2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name
[] = DRV_NAME
;
46 const char qlge_driver_version
[] = DRV_VERSION
;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING
" ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION
);
53 static const u32 default_msg
=
54 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
65 static int debug
= 0x00007fff; /* defaults above */
66 module_param(debug
, int, 0);
67 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static int irq_type
= MSIX_IRQ
;
73 module_param(irq_type
, int, MSIX_IRQ
);
74 MODULE_PARM_DESC(irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
85 /* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
89 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
95 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
98 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
101 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
103 case SEM_MAC_ADDR_MASK
:
104 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
107 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
110 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
112 case SEM_RT_IDX_MASK
:
113 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
115 case SEM_PROC_REG_MASK
:
116 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
119 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
123 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
124 return !(ql_read32(qdev
, SEM
) & sem_bits
);
127 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
129 unsigned int wait_count
= 30;
131 if (!ql_sem_trylock(qdev
, sem_mask
))
134 } while (--wait_count
);
138 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
140 ql_write32(qdev
, SEM
, sem_mask
);
141 ql_read32(qdev
, SEM
); /* flush */
144 /* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
152 int count
= UDELAY_COUNT
;
155 temp
= ql_read32(qdev
, reg
);
157 /* check for errors */
158 if (temp
& err_bit
) {
159 QPRINTK(qdev
, PROBE
, ALERT
,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
163 } else if (temp
& bit
)
165 udelay(UDELAY_DELAY
);
168 QPRINTK(qdev
, PROBE
, ALERT
,
169 "Timed out waiting for reg %x to come ready.\n", reg
);
173 /* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
176 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
178 int count
= UDELAY_COUNT
;
182 temp
= ql_read32(qdev
, CFG
);
187 udelay(UDELAY_DELAY
);
194 /* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
197 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
207 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
210 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
211 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
212 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
216 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
220 status
= ql_wait_cfg(qdev
, bit
);
222 QPRINTK(qdev
, IFUP
, ERR
,
223 "Timed out waiting for CFG to come ready.\n");
227 ql_write32(qdev
, ICB_L
, (u32
) map
);
228 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
230 mask
= CFG_Q_MASK
| (bit
<< 16);
231 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
232 ql_write32(qdev
, CFG
, (mask
| value
));
235 * Wait for the bit to clear after signaling hw.
237 status
= ql_wait_cfg(qdev
, bit
);
239 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
240 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
252 case MAC_ADDR_TYPE_MULTI_MAC
:
253 case MAC_ADDR_TYPE_CAM_MAC
:
256 ql_wait_reg_rdy(qdev
,
257 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
260 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
261 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
262 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
264 ql_wait_reg_rdy(qdev
,
265 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
268 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
270 ql_wait_reg_rdy(qdev
,
271 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
274 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
275 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
276 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
278 ql_wait_reg_rdy(qdev
,
279 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
282 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
283 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
285 ql_wait_reg_rdy(qdev
,
286 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
289 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
290 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
291 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
293 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
297 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
301 case MAC_ADDR_TYPE_VLAN
:
302 case MAC_ADDR_TYPE_MULTI_FLTR
:
304 QPRINTK(qdev
, IFUP
, CRIT
,
305 "Address type %d not yet supported.\n", type
);
312 /* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
315 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
322 case MAC_ADDR_TYPE_MULTI_MAC
:
324 u32 upper
= (addr
[0] << 8) | addr
[1];
325 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
326 (addr
[4] << 8) | (addr
[5]);
329 ql_wait_reg_rdy(qdev
,
330 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
333 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
334 (index
<< MAC_ADDR_IDX_SHIFT
) |
336 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
338 ql_wait_reg_rdy(qdev
,
339 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
342 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
343 (index
<< MAC_ADDR_IDX_SHIFT
) |
346 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
348 ql_wait_reg_rdy(qdev
,
349 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
354 case MAC_ADDR_TYPE_CAM_MAC
:
357 u32 upper
= (addr
[0] << 8) | addr
[1];
359 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
362 QPRINTK(qdev
, IFUP
, DEBUG
,
363 "Adding %s address %pM"
364 " at index %d in the CAM.\n",
366 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
367 "UNICAST"), addr
, index
);
370 ql_wait_reg_rdy(qdev
,
371 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
374 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
375 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
377 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
379 ql_wait_reg_rdy(qdev
,
380 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
383 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
384 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
386 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
388 ql_wait_reg_rdy(qdev
,
389 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
392 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
393 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
399 cam_output
= (CAM_OUT_ROUTE_NIC
|
401 func
<< CAM_OUT_FUNC_SHIFT
) |
402 (0 << CAM_OUT_CQ_ID_SHIFT
));
404 cam_output
|= CAM_OUT_RV
;
405 /* route to NIC core */
406 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
409 case MAC_ADDR_TYPE_VLAN
:
411 u32 enable_bit
= *((u32
*) &addr
[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
417 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit
? "Adding" : "Removing"),
419 index
, (enable_bit
? "to" : "from"));
422 ql_wait_reg_rdy(qdev
,
423 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
426 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
427 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
429 enable_bit
); /* enable/disable */
432 case MAC_ADDR_TYPE_MULTI_FLTR
:
434 QPRINTK(qdev
, IFUP
, CRIT
,
435 "Address type %d not yet supported.\n", type
);
442 /* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
446 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
449 char zero_mac_addr
[ETH_ALEN
];
453 addr
= &qdev
->ndev
->dev_addr
[0];
454 QPRINTK(qdev
, IFUP
, DEBUG
,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr
[0], addr
[1], addr
[2], addr
[3],
459 memset(zero_mac_addr
, 0, ETH_ALEN
);
460 addr
= &zero_mac_addr
[0];
461 QPRINTK(qdev
, IFUP
, DEBUG
,
462 "Clearing MAC address on %s\n",
465 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
468 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
469 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
470 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
472 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac "
477 void ql_link_on(struct ql_adapter
*qdev
)
479 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is up.\n",
481 netif_carrier_on(qdev
->ndev
);
482 ql_set_mac_addr(qdev
, 1);
485 void ql_link_off(struct ql_adapter
*qdev
)
487 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is down.\n",
489 netif_carrier_off(qdev
->ndev
);
490 ql_set_mac_addr(qdev
, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
500 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
504 ql_write32(qdev
, RT_IDX
,
505 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
506 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
509 *value
= ql_read32(qdev
, RT_DATA
);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
522 int status
= -EINVAL
; /* Return error if no mask match. */
525 QPRINTK(qdev
, IFUP
, DEBUG
,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable
? "Adding" : "Removing"),
528 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
533 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
534 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
535 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
536 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
537 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
539 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
540 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
541 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
542 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
543 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
544 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
545 (enable
? "to" : "from"));
550 value
= RT_IDX_DST_CAM_Q
| /* dest */
551 RT_IDX_TYPE_NICQ
| /* type */
552 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
555 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
557 value
= RT_IDX_DST_DFLT_Q
| /* dest */
558 RT_IDX_TYPE_NICQ
| /* type */
559 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
562 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
564 value
= RT_IDX_DST_DFLT_Q
| /* dest */
565 RT_IDX_TYPE_NICQ
| /* type */
566 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
569 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
571 value
= RT_IDX_DST_DFLT_Q
| /* dest */
572 RT_IDX_TYPE_NICQ
| /* type */
573 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
576 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
578 value
= RT_IDX_DST_DFLT_Q
| /* dest */
579 RT_IDX_TYPE_NICQ
| /* type */
580 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
583 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
585 value
= RT_IDX_DST_DFLT_Q
| /* dest */
586 RT_IDX_TYPE_NICQ
| /* type */
587 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
590 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
592 value
= RT_IDX_DST_RSS
| /* dest */
593 RT_IDX_TYPE_NICQ
| /* type */
594 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
597 case 0: /* Clear the E-bit on an entry. */
599 value
= RT_IDX_DST_DFLT_Q
| /* dest */
600 RT_IDX_TYPE_NICQ
| /* type */
601 (index
<< RT_IDX_IDX_SHIFT
);/* index */
605 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
612 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
615 value
|= (enable
? RT_IDX_E
: 0);
616 ql_write32(qdev
, RT_IDX
, value
);
617 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
623 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
625 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
628 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
630 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
633 /* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
639 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
642 unsigned long hw_flags
= 0;
643 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
645 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
649 ql_write32(qdev
, INTR_EN
,
651 var
= ql_read32(qdev
, STS
);
655 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
656 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
657 ql_write32(qdev
, INTR_EN
,
659 var
= ql_read32(qdev
, STS
);
661 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
665 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
668 struct intr_context
*ctx
;
670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
673 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
676 ctx
= qdev
->intr_context
+ intr
;
677 spin_lock(&qdev
->hw_lock
);
678 if (!atomic_read(&ctx
->irq_cnt
)) {
679 ql_write32(qdev
, INTR_EN
,
681 var
= ql_read32(qdev
, STS
);
683 atomic_inc(&ctx
->irq_cnt
);
684 spin_unlock(&qdev
->hw_lock
);
688 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
691 for (i
= 0; i
< qdev
->intr_count
; i
++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
696 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
698 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
699 ql_enable_completion_interrupt(qdev
, i
);
704 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
708 __le16
*flash
= (__le16
*)&qdev
->flash
;
710 status
= strncmp((char *)&qdev
->flash
, str
, 4);
712 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash signature.\n");
716 for (i
= 0; i
< size
; i
++)
717 csum
+= le16_to_cpu(*flash
++);
720 QPRINTK(qdev
, IFUP
, ERR
,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
726 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
729 /* wait for reg to come ready */
730 status
= ql_wait_reg_rdy(qdev
,
731 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
734 /* set up for reg read */
735 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
736 /* wait for reg to come ready */
737 status
= ql_wait_reg_rdy(qdev
,
738 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
745 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
750 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
754 __le32
*p
= (__le32
*)&qdev
->flash
;
758 /* Get flash offset for function and adjust
762 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
764 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
766 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
769 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
770 for (i
= 0; i
< size
; i
++, p
++) {
771 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
773 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
778 status
= ql_validate_flash(qdev
,
779 sizeof(struct flash_params_8000
) / sizeof(u16
),
782 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
787 /* Extract either manufacturer or BOFM modified
790 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
792 qdev
->flash
.flash_params_8000
.mac_addr1
,
793 qdev
->ndev
->addr_len
);
796 qdev
->flash
.flash_params_8000
.mac_addr
,
797 qdev
->ndev
->addr_len
);
799 if (!is_valid_ether_addr(mac_addr
)) {
800 QPRINTK(qdev
, IFUP
, ERR
, "Invalid MAC address.\n");
805 memcpy(qdev
->ndev
->dev_addr
,
807 qdev
->ndev
->addr_len
);
810 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
814 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
818 __le32
*p
= (__le32
*)&qdev
->flash
;
820 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
822 /* Second function's parameters follow the first
828 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
831 for (i
= 0; i
< size
; i
++, p
++) {
832 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
834 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
840 status
= ql_validate_flash(qdev
,
841 sizeof(struct flash_params_8012
) / sizeof(u16
),
844 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
849 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
854 memcpy(qdev
->ndev
->dev_addr
,
855 qdev
->flash
.flash_params_8012
.mac_addr
,
856 qdev
->ndev
->addr_len
);
859 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
863 /* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
867 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
870 /* wait for reg to come ready */
871 status
= ql_wait_reg_rdy(qdev
,
872 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
875 /* write the data to the data reg */
876 ql_write32(qdev
, XGMAC_DATA
, data
);
877 /* trigger the write */
878 ql_write32(qdev
, XGMAC_ADDR
, reg
);
882 /* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
886 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
889 /* wait for reg to come ready */
890 status
= ql_wait_reg_rdy(qdev
,
891 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
894 /* set up for reg read */
895 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
896 /* wait for reg to come ready */
897 status
= ql_wait_reg_rdy(qdev
,
898 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
902 *data
= ql_read32(qdev
, XGMAC_DATA
);
907 /* This is used for reading the 64-bit statistics regs. */
908 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
914 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
918 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
922 *data
= (u64
) lo
| ((u64
) hi
<< 32);
928 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
932 * Get MPI firmware version for driver banner
935 status
= ql_mb_about_fw(qdev
);
938 status
= ql_mb_get_fw_state(qdev
);
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
947 /* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
953 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
958 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
962 QPRINTK(qdev
, LINK
, INFO
,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
966 QPRINTK(qdev
, LINK
, CRIT
,
967 "Port initialize timed out.\n");
972 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
977 data
|= GLOBAL_CFG_RESET
;
978 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
984 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
985 data
|= GLOBAL_CFG_TX_STAT_EN
;
986 data
|= GLOBAL_CFG_RX_STAT_EN
;
987 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
991 /* Enable transmitter, and clear it's reset. */
992 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
995 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
996 data
|= TX_CFG_EN
; /* Enable the transmitter. */
997 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1001 /* Enable receiver and clear it's reset. */
1002 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1005 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1006 data
|= RX_CFG_EN
; /* Enable the receiver. */
1007 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1011 /* Turn on jumbo. */
1013 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1017 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1024 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1028 static inline unsigned int ql_lbq_block_size(struct ql_adapter
*qdev
)
1030 return PAGE_SIZE
<< qdev
->lbq_buf_order
;
1033 /* Get the next large buffer. */
1034 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1036 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1037 rx_ring
->lbq_curr_idx
++;
1038 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1039 rx_ring
->lbq_curr_idx
= 0;
1040 rx_ring
->lbq_free_cnt
++;
1044 static struct bq_desc
*ql_get_curr_lchunk(struct ql_adapter
*qdev
,
1045 struct rx_ring
*rx_ring
)
1047 struct bq_desc
*lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1049 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1050 pci_unmap_addr(lbq_desc
, mapaddr
),
1051 rx_ring
->lbq_buf_size
,
1052 PCI_DMA_FROMDEVICE
);
1054 /* If it's the last chunk of our master page then
1057 if ((lbq_desc
->p
.pg_chunk
.offset
+ rx_ring
->lbq_buf_size
)
1058 == ql_lbq_block_size(qdev
))
1059 pci_unmap_page(qdev
->pdev
,
1060 lbq_desc
->p
.pg_chunk
.map
,
1061 ql_lbq_block_size(qdev
),
1062 PCI_DMA_FROMDEVICE
);
1066 /* Get the next small buffer. */
1067 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1069 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1070 rx_ring
->sbq_curr_idx
++;
1071 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1072 rx_ring
->sbq_curr_idx
= 0;
1073 rx_ring
->sbq_free_cnt
++;
1077 /* Update an rx ring index. */
1078 static void ql_update_cq(struct rx_ring
*rx_ring
)
1080 rx_ring
->cnsmr_idx
++;
1081 rx_ring
->curr_entry
++;
1082 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1083 rx_ring
->cnsmr_idx
= 0;
1084 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1088 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1090 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1093 static int ql_get_next_chunk(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
,
1094 struct bq_desc
*lbq_desc
)
1096 if (!rx_ring
->pg_chunk
.page
) {
1098 rx_ring
->pg_chunk
.page
= alloc_pages(__GFP_COLD
| __GFP_COMP
|
1100 qdev
->lbq_buf_order
);
1101 if (unlikely(!rx_ring
->pg_chunk
.page
)) {
1102 QPRINTK(qdev
, DRV
, ERR
,
1103 "page allocation failed.\n");
1106 rx_ring
->pg_chunk
.offset
= 0;
1107 map
= pci_map_page(qdev
->pdev
, rx_ring
->pg_chunk
.page
,
1108 0, ql_lbq_block_size(qdev
),
1109 PCI_DMA_FROMDEVICE
);
1110 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1111 __free_pages(rx_ring
->pg_chunk
.page
,
1112 qdev
->lbq_buf_order
);
1113 QPRINTK(qdev
, DRV
, ERR
,
1114 "PCI mapping failed.\n");
1117 rx_ring
->pg_chunk
.map
= map
;
1118 rx_ring
->pg_chunk
.va
= page_address(rx_ring
->pg_chunk
.page
);
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1124 lbq_desc
->p
.pg_chunk
= rx_ring
->pg_chunk
;
1126 /* Adjust the master page chunk for next
1129 rx_ring
->pg_chunk
.offset
+= rx_ring
->lbq_buf_size
;
1130 if (rx_ring
->pg_chunk
.offset
== ql_lbq_block_size(qdev
)) {
1131 rx_ring
->pg_chunk
.page
= NULL
;
1132 lbq_desc
->p
.pg_chunk
.last_flag
= 1;
1134 rx_ring
->pg_chunk
.va
+= rx_ring
->lbq_buf_size
;
1135 get_page(rx_ring
->pg_chunk
.page
);
1136 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
1140 /* Process (refill) a large buffer queue. */
1141 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1143 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1144 u32 start_idx
= clean_idx
;
1145 struct bq_desc
*lbq_desc
;
1149 while (rx_ring
->lbq_free_cnt
> 32) {
1150 for (i
= 0; i
< 16; i
++) {
1151 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1152 "lbq: try cleaning clean_idx = %d.\n",
1154 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1155 if (ql_get_next_chunk(qdev
, rx_ring
, lbq_desc
)) {
1156 QPRINTK(qdev
, IFUP
, ERR
,
1157 "Could not get a page chunk.\n");
1161 map
= lbq_desc
->p
.pg_chunk
.map
+
1162 lbq_desc
->p
.pg_chunk
.offset
;
1163 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1164 pci_unmap_len_set(lbq_desc
, maplen
,
1165 rx_ring
->lbq_buf_size
);
1166 *lbq_desc
->addr
= cpu_to_le64(map
);
1168 pci_dma_sync_single_for_device(qdev
->pdev
, map
,
1169 rx_ring
->lbq_buf_size
,
1170 PCI_DMA_FROMDEVICE
);
1172 if (clean_idx
== rx_ring
->lbq_len
)
1176 rx_ring
->lbq_clean_idx
= clean_idx
;
1177 rx_ring
->lbq_prod_idx
+= 16;
1178 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1179 rx_ring
->lbq_prod_idx
= 0;
1180 rx_ring
->lbq_free_cnt
-= 16;
1183 if (start_idx
!= clean_idx
) {
1184 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring
->lbq_prod_idx
);
1187 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1188 rx_ring
->lbq_prod_idx_db_reg
);
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1195 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1196 u32 start_idx
= clean_idx
;
1197 struct bq_desc
*sbq_desc
;
1201 while (rx_ring
->sbq_free_cnt
> 16) {
1202 for (i
= 0; i
< 16; i
++) {
1203 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1204 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1205 "sbq: try cleaning clean_idx = %d.\n",
1207 if (sbq_desc
->p
.skb
== NULL
) {
1208 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1209 "sbq: getting new skb for index %d.\n",
1212 netdev_alloc_skb(qdev
->ndev
,
1214 if (sbq_desc
->p
.skb
== NULL
) {
1215 QPRINTK(qdev
, PROBE
, ERR
,
1216 "Couldn't get an skb.\n");
1217 rx_ring
->sbq_clean_idx
= clean_idx
;
1220 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1221 map
= pci_map_single(qdev
->pdev
,
1222 sbq_desc
->p
.skb
->data
,
1223 rx_ring
->sbq_buf_size
,
1224 PCI_DMA_FROMDEVICE
);
1225 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1226 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
1227 rx_ring
->sbq_clean_idx
= clean_idx
;
1228 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1229 sbq_desc
->p
.skb
= NULL
;
1232 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1233 pci_unmap_len_set(sbq_desc
, maplen
,
1234 rx_ring
->sbq_buf_size
);
1235 *sbq_desc
->addr
= cpu_to_le64(map
);
1239 if (clean_idx
== rx_ring
->sbq_len
)
1242 rx_ring
->sbq_clean_idx
= clean_idx
;
1243 rx_ring
->sbq_prod_idx
+= 16;
1244 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1245 rx_ring
->sbq_prod_idx
= 0;
1246 rx_ring
->sbq_free_cnt
-= 16;
1249 if (start_idx
!= clean_idx
) {
1250 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring
->sbq_prod_idx
);
1253 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1254 rx_ring
->sbq_prod_idx_db_reg
);
1258 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1259 struct rx_ring
*rx_ring
)
1261 ql_update_sbq(qdev
, rx_ring
);
1262 ql_update_lbq(qdev
, rx_ring
);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter
*qdev
,
1269 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1272 for (i
= 0; i
< mapped
; i
++) {
1273 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1285 "unmapping OAL area.\n");
1287 pci_unmap_single(qdev
->pdev
,
1288 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1290 pci_unmap_len(&tx_ring_desc
->map
[i
],
1294 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1296 pci_unmap_page(qdev
->pdev
,
1297 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1299 pci_unmap_len(&tx_ring_desc
->map
[i
],
1300 maplen
), PCI_DMA_TODEVICE
);
1306 /* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 static int ql_map_send(struct ql_adapter
*qdev
,
1310 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1311 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1313 int len
= skb_headlen(skb
);
1315 int frag_idx
, err
, map_idx
= 0;
1316 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1317 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1320 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1323 * Map the skb buffer first.
1325 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1327 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1329 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1330 "PCI mapping failed with error: %d\n", err
);
1332 return NETDEV_TX_BUSY
;
1335 tbd
->len
= cpu_to_le32(len
);
1336 tbd
->addr
= cpu_to_le64(map
);
1337 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1338 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1348 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1349 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1351 if (frag_idx
== 6 && frag_cnt
> 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1374 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1376 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1377 "PCI mapping outbound address list with error: %d\n",
1382 tbd
->addr
= cpu_to_le64(map
);
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1389 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1390 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1391 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1393 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1394 sizeof(struct oal
));
1395 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1400 pci_map_page(qdev
->pdev
, frag
->page
,
1401 frag
->page_offset
, frag
->size
,
1404 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1406 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd
->addr
= cpu_to_le64(map
);
1413 tbd
->len
= cpu_to_le32(frag
->size
);
1414 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1415 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc
->map_cnt
= map_idx
;
1421 /* Terminate the last segment. */
1422 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1423 return NETDEV_TX_OK
;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1433 return NETDEV_TX_BUSY
;
1436 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1438 void *temp_addr
= skb
->data
;
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1444 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1445 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1446 skb_copy_to_linear_data(skb
, temp_addr
,
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1455 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1456 struct rx_ring
*rx_ring
,
1457 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1459 struct bq_desc
*lbq_desc
;
1460 struct bq_desc
*sbq_desc
;
1461 struct sk_buff
*skb
= NULL
;
1462 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1463 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1466 * Handle the header buffer if present.
1468 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1469 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1470 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1472 * Headers fit nicely into a small buffer.
1474 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1475 pci_unmap_single(qdev
->pdev
,
1476 pci_unmap_addr(sbq_desc
, mapaddr
),
1477 pci_unmap_len(sbq_desc
, maplen
),
1478 PCI_DMA_FROMDEVICE
);
1479 skb
= sbq_desc
->p
.skb
;
1480 ql_realign_skb(skb
, hdr_len
);
1481 skb_put(skb
, hdr_len
);
1482 sbq_desc
->p
.skb
= NULL
;
1486 * Handle the data buffer(s).
1488 if (unlikely(!length
)) { /* Is there data too? */
1489 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1490 "No Data buffer in this packet.\n");
1494 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1495 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1496 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1505 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1506 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1508 (sbq_desc
, mapaddr
),
1511 PCI_DMA_FROMDEVICE
);
1512 memcpy(skb_put(skb
, length
),
1513 sbq_desc
->p
.skb
->data
, length
);
1514 pci_dma_sync_single_for_device(qdev
->pdev
,
1521 PCI_DMA_FROMDEVICE
);
1523 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1524 "%d bytes in a single small buffer.\n", length
);
1525 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1526 skb
= sbq_desc
->p
.skb
;
1527 ql_realign_skb(skb
, length
);
1528 skb_put(skb
, length
);
1529 pci_unmap_single(qdev
->pdev
,
1530 pci_unmap_addr(sbq_desc
,
1532 pci_unmap_len(sbq_desc
,
1534 PCI_DMA_FROMDEVICE
);
1535 sbq_desc
->p
.skb
= NULL
;
1537 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1538 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1539 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1546 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1547 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc
->p
.pg_chunk
.offset
, length
);
1551 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1552 lbq_desc
->p
.pg_chunk
.offset
,
1555 skb
->data_len
+= length
;
1556 skb
->truesize
+= length
;
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1563 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1564 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1566 QPRINTK(qdev
, PROBE
, DEBUG
,
1567 "No skb available, drop the packet.\n");
1570 pci_unmap_page(qdev
->pdev
,
1571 pci_unmap_addr(lbq_desc
,
1573 pci_unmap_len(lbq_desc
, maplen
),
1574 PCI_DMA_FROMDEVICE
);
1575 skb_reserve(skb
, NET_IP_ALIGN
);
1576 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1578 skb_fill_page_desc(skb
, 0,
1579 lbq_desc
->p
.pg_chunk
.page
,
1580 lbq_desc
->p
.pg_chunk
.offset
,
1583 skb
->data_len
+= length
;
1584 skb
->truesize
+= length
;
1586 __pskb_pull_tail(skb
,
1587 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1588 VLAN_ETH_HLEN
: ETH_HLEN
);
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1603 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1604 pci_unmap_single(qdev
->pdev
,
1605 pci_unmap_addr(sbq_desc
, mapaddr
),
1606 pci_unmap_len(sbq_desc
, maplen
),
1607 PCI_DMA_FROMDEVICE
);
1608 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1618 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1619 "%d bytes of headers & data in chain of large.\n", length
);
1620 skb
= sbq_desc
->p
.skb
;
1621 sbq_desc
->p
.skb
= NULL
;
1622 skb_reserve(skb
, NET_IP_ALIGN
);
1624 while (length
> 0) {
1625 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1626 size
= (length
< rx_ring
->lbq_buf_size
) ? length
:
1627 rx_ring
->lbq_buf_size
;
1629 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1630 "Adding page %d to skb for %d bytes.\n",
1632 skb_fill_page_desc(skb
, i
,
1633 lbq_desc
->p
.pg_chunk
.page
,
1634 lbq_desc
->p
.pg_chunk
.offset
,
1637 skb
->data_len
+= size
;
1638 skb
->truesize
+= size
;
1642 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1643 VLAN_ETH_HLEN
: ETH_HLEN
);
1648 /* Process an inbound completion from an rx ring. */
1649 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1650 struct rx_ring
*rx_ring
,
1651 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1653 struct net_device
*ndev
= qdev
->ndev
;
1654 struct sk_buff
*skb
= NULL
;
1655 u16 vlan_id
= (le16_to_cpu(ib_mac_rsp
->vlan_id
) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK
)
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1660 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1661 if (unlikely(!skb
)) {
1662 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1663 "No skb available, drop packet.\n");
1667 /* Frame error, so drop the packet. */
1668 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1669 QPRINTK(qdev
, DRV
, ERR
, "Receive error, flags2 = 0x%x\n",
1670 ib_mac_rsp
->flags2
);
1671 dev_kfree_skb_any(skb
);
1675 /* The max framesize filter on this chip is set higher than
1676 * MTU since FCoE uses 2k frames.
1678 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1679 dev_kfree_skb_any(skb
);
1683 prefetch(skb
->data
);
1685 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1686 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1687 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1688 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1689 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1690 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1691 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1692 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1694 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1695 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1698 skb
->protocol
= eth_type_trans(skb
, ndev
);
1699 skb
->ip_summed
= CHECKSUM_NONE
;
1701 /* If rx checksum is on, and there are no
1702 * csum or frame errors.
1704 if (qdev
->rx_csum
&&
1705 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1707 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1708 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1709 "TCP checksum done!\n");
1710 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1711 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1712 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1713 /* Unfragmented ipv4 UDP frame. */
1714 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1715 if (!(iph
->frag_off
&
1716 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1717 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1718 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1719 "TCP checksum done!\n");
1724 ndev
->stats
.rx_packets
++;
1725 ndev
->stats
.rx_bytes
+= skb
->len
;
1726 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1727 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1729 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1731 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
1734 napi_gro_receive(&rx_ring
->napi
, skb
);
1737 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1739 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1741 netif_receive_skb(skb
);
1745 /* Process an outbound completion from an rx ring. */
1746 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1747 struct ob_mac_iocb_rsp
*mac_rsp
)
1749 struct net_device
*ndev
= qdev
->ndev
;
1750 struct tx_ring
*tx_ring
;
1751 struct tx_ring_desc
*tx_ring_desc
;
1753 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1754 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1755 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1756 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1757 ndev
->stats
.tx_bytes
+= (tx_ring_desc
->skb
)->len
;
1758 ndev
->stats
.tx_packets
++;
1759 dev_kfree_skb(tx_ring_desc
->skb
);
1760 tx_ring_desc
->skb
= NULL
;
1762 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1765 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1766 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1767 QPRINTK(qdev
, TX_DONE
, WARNING
,
1768 "Total descriptor length did not match transfer length.\n");
1770 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1771 QPRINTK(qdev
, TX_DONE
, WARNING
,
1772 "Frame too short to be legal, not sent.\n");
1774 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1775 QPRINTK(qdev
, TX_DONE
, WARNING
,
1776 "Frame too long, but sent anyway.\n");
1778 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1779 QPRINTK(qdev
, TX_DONE
, WARNING
,
1780 "PCI backplane error. Frame not sent.\n");
1783 atomic_inc(&tx_ring
->tx_count
);
1786 /* Fire up a handler to reset the MPI processor. */
1787 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1790 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1793 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1796 ql_disable_interrupts(qdev
);
1797 /* Clear adapter up bit to signal the recovery
1798 * process that it shouldn't kill the reset worker
1801 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
1802 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1805 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1806 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1808 switch (ib_ae_rsp
->event
) {
1809 case MGMT_ERR_EVENT
:
1810 QPRINTK(qdev
, RX_ERR
, ERR
,
1811 "Management Processor Fatal Error.\n");
1812 ql_queue_fw_error(qdev
);
1815 case CAM_LOOKUP_ERR_EVENT
:
1816 QPRINTK(qdev
, LINK
, ERR
,
1817 "Multiple CAM hits lookup occurred.\n");
1818 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1819 ql_queue_asic_error(qdev
);
1822 case SOFT_ECC_ERROR_EVENT
:
1823 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1824 ql_queue_asic_error(qdev
);
1827 case PCI_ERR_ANON_BUF_RD
:
1828 QPRINTK(qdev
, RX_ERR
, ERR
,
1829 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1831 ql_queue_asic_error(qdev
);
1835 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1837 ql_queue_asic_error(qdev
);
1842 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1844 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1845 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1846 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1849 struct tx_ring
*tx_ring
;
1850 /* While there are entries in the completion queue. */
1851 while (prod
!= rx_ring
->cnsmr_idx
) {
1853 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1854 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1855 prod
, rx_ring
->cnsmr_idx
);
1857 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1859 switch (net_rsp
->opcode
) {
1861 case OPCODE_OB_MAC_TSO_IOCB
:
1862 case OPCODE_OB_MAC_IOCB
:
1863 ql_process_mac_tx_intr(qdev
, net_rsp
);
1866 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1867 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1871 ql_update_cq(rx_ring
);
1872 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1874 ql_write_cq_idx(rx_ring
);
1875 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1876 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
) &&
1878 if (atomic_read(&tx_ring
->queue_stopped
) &&
1879 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1881 * The queue got stopped because the tx_ring was full.
1882 * Wake it up, because it's now at least 25% empty.
1884 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
1890 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1892 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1893 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1894 struct ql_net_rsp_iocb
*net_rsp
;
1897 /* While there are entries in the completion queue. */
1898 while (prod
!= rx_ring
->cnsmr_idx
) {
1900 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1901 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1902 prod
, rx_ring
->cnsmr_idx
);
1904 net_rsp
= rx_ring
->curr_entry
;
1906 switch (net_rsp
->opcode
) {
1907 case OPCODE_IB_MAC_IOCB
:
1908 ql_process_mac_rx_intr(qdev
, rx_ring
,
1909 (struct ib_mac_iocb_rsp
*)
1913 case OPCODE_IB_AE_IOCB
:
1914 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1919 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1920 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1925 ql_update_cq(rx_ring
);
1926 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1927 if (count
== budget
)
1930 ql_update_buffer_queues(qdev
, rx_ring
);
1931 ql_write_cq_idx(rx_ring
);
1935 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1937 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1938 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1939 struct rx_ring
*trx_ring
;
1940 int i
, work_done
= 0;
1941 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
1943 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1946 /* Service the TX rings first. They start
1947 * right after the RSS rings. */
1948 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
1949 trx_ring
= &qdev
->rx_ring
[i
];
1950 /* If this TX completion ring belongs to this vector and
1951 * it's not empty then service it.
1953 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
1954 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
1955 trx_ring
->cnsmr_idx
)) {
1956 QPRINTK(qdev
, INTR
, DEBUG
,
1957 "%s: Servicing TX completion ring %d.\n",
1958 __func__
, trx_ring
->cq_id
);
1959 ql_clean_outbound_rx_ring(trx_ring
);
1964 * Now service the RSS ring if it's active.
1966 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
1967 rx_ring
->cnsmr_idx
) {
1968 QPRINTK(qdev
, INTR
, DEBUG
,
1969 "%s: Servicing RX completion ring %d.\n",
1970 __func__
, rx_ring
->cq_id
);
1971 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1974 if (work_done
< budget
) {
1975 napi_complete(napi
);
1976 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1981 static void ql_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1983 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1987 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1988 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1989 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
1991 QPRINTK(qdev
, IFUP
, DEBUG
,
1992 "Turning off VLAN in NIC_RCV_CFG.\n");
1993 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
1997 static void ql_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
1999 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2000 u32 enable_bit
= MAC_ADDR_E
;
2003 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2006 if (ql_set_mac_addr_reg
2007 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2008 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
2010 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2013 static void ql_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
2015 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2019 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2023 if (ql_set_mac_addr_reg
2024 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2025 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
2027 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2031 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2032 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
2034 struct rx_ring
*rx_ring
= dev_id
;
2035 napi_schedule(&rx_ring
->napi
);
2039 /* This handles a fatal error, MPI activity, and the default
2040 * rx_ring in an MSI-X multiple vector environment.
2041 * In MSI/Legacy environment it also process the rest of
2044 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
2046 struct rx_ring
*rx_ring
= dev_id
;
2047 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2048 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2052 spin_lock(&qdev
->hw_lock
);
2053 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2054 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
2055 spin_unlock(&qdev
->hw_lock
);
2058 spin_unlock(&qdev
->hw_lock
);
2060 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2063 * Check for fatal error.
2066 ql_queue_asic_error(qdev
);
2067 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
2068 var
= ql_read32(qdev
, ERR_STS
);
2069 QPRINTK(qdev
, INTR
, ERR
,
2070 "Resetting chip. Error Status Register = 0x%x\n", var
);
2075 * Check MPI processor activity.
2077 if ((var
& STS_PI
) &&
2078 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2080 * We've got an async event or mailbox completion.
2081 * Handle it and clear the source of the interrupt.
2083 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
2084 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2085 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2086 queue_delayed_work_on(smp_processor_id(),
2087 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2092 * Get the bit-mask that shows the active queues for this
2093 * pass. Compare it to the queues that this irq services
2094 * and call napi if there's a match.
2096 var
= ql_read32(qdev
, ISR1
);
2097 if (var
& intr_context
->irq_mask
) {
2098 QPRINTK(qdev
, INTR
, INFO
,
2099 "Waking handler for rx_ring[0].\n");
2100 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2101 napi_schedule(&rx_ring
->napi
);
2104 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2105 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2108 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2111 if (skb_is_gso(skb
)) {
2113 if (skb_header_cloned(skb
)) {
2114 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2119 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2120 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2121 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2122 mac_iocb_ptr
->total_hdrs_len
=
2123 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2124 mac_iocb_ptr
->net_trans_offset
=
2125 cpu_to_le16(skb_network_offset(skb
) |
2126 skb_transport_offset(skb
)
2127 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2128 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2129 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2130 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2131 struct iphdr
*iph
= ip_hdr(skb
);
2133 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2134 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2138 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2139 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2140 tcp_hdr(skb
)->check
=
2141 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2142 &ipv6_hdr(skb
)->daddr
,
2150 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2151 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2154 struct iphdr
*iph
= ip_hdr(skb
);
2156 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2157 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2158 mac_iocb_ptr
->net_trans_offset
=
2159 cpu_to_le16(skb_network_offset(skb
) |
2160 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2162 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2163 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2164 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2165 check
= &(tcp_hdr(skb
)->check
);
2166 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2167 mac_iocb_ptr
->total_hdrs_len
=
2168 cpu_to_le16(skb_transport_offset(skb
) +
2169 (tcp_hdr(skb
)->doff
<< 2));
2171 check
= &(udp_hdr(skb
)->check
);
2172 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2173 mac_iocb_ptr
->total_hdrs_len
=
2174 cpu_to_le16(skb_transport_offset(skb
) +
2175 sizeof(struct udphdr
));
2177 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2178 iph
->daddr
, len
, iph
->protocol
, 0);
2181 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2183 struct tx_ring_desc
*tx_ring_desc
;
2184 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2185 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2187 struct tx_ring
*tx_ring
;
2188 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2190 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2192 if (skb_padto(skb
, ETH_ZLEN
))
2193 return NETDEV_TX_OK
;
2195 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2196 QPRINTK(qdev
, TX_QUEUED
, INFO
,
2197 "%s: shutting down tx queue %d du to lack of resources.\n",
2198 __func__
, tx_ring_idx
);
2199 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2200 atomic_inc(&tx_ring
->queue_stopped
);
2201 return NETDEV_TX_BUSY
;
2203 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2204 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2205 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2207 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2208 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2209 /* We use the upper 32-bits to store the tx queue for this IO.
2210 * When we get the completion we can use it to establish the context.
2212 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2213 tx_ring_desc
->skb
= skb
;
2215 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2217 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2218 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
2219 vlan_tx_tag_get(skb
));
2220 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2221 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2223 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2225 dev_kfree_skb_any(skb
);
2226 return NETDEV_TX_OK
;
2227 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2228 ql_hw_csum_setup(skb
,
2229 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2231 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2233 QPRINTK(qdev
, TX_QUEUED
, ERR
,
2234 "Could not map the segments.\n");
2235 return NETDEV_TX_BUSY
;
2237 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2238 tx_ring
->prod_idx
++;
2239 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2240 tx_ring
->prod_idx
= 0;
2243 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2244 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
2245 tx_ring
->prod_idx
, skb
->len
);
2247 atomic_dec(&tx_ring
->tx_count
);
2248 return NETDEV_TX_OK
;
2251 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2253 if (qdev
->rx_ring_shadow_reg_area
) {
2254 pci_free_consistent(qdev
->pdev
,
2256 qdev
->rx_ring_shadow_reg_area
,
2257 qdev
->rx_ring_shadow_reg_dma
);
2258 qdev
->rx_ring_shadow_reg_area
= NULL
;
2260 if (qdev
->tx_ring_shadow_reg_area
) {
2261 pci_free_consistent(qdev
->pdev
,
2263 qdev
->tx_ring_shadow_reg_area
,
2264 qdev
->tx_ring_shadow_reg_dma
);
2265 qdev
->tx_ring_shadow_reg_area
= NULL
;
2269 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2271 qdev
->rx_ring_shadow_reg_area
=
2272 pci_alloc_consistent(qdev
->pdev
,
2273 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2274 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2275 QPRINTK(qdev
, IFUP
, ERR
,
2276 "Allocation of RX shadow space failed.\n");
2279 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2280 qdev
->tx_ring_shadow_reg_area
=
2281 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2282 &qdev
->tx_ring_shadow_reg_dma
);
2283 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2284 QPRINTK(qdev
, IFUP
, ERR
,
2285 "Allocation of TX shadow space failed.\n");
2286 goto err_wqp_sh_area
;
2288 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2292 pci_free_consistent(qdev
->pdev
,
2294 qdev
->rx_ring_shadow_reg_area
,
2295 qdev
->rx_ring_shadow_reg_dma
);
2299 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2301 struct tx_ring_desc
*tx_ring_desc
;
2303 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2305 mac_iocb_ptr
= tx_ring
->wq_base
;
2306 tx_ring_desc
= tx_ring
->q
;
2307 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2308 tx_ring_desc
->index
= i
;
2309 tx_ring_desc
->skb
= NULL
;
2310 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2314 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2315 atomic_set(&tx_ring
->queue_stopped
, 0);
2318 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2319 struct tx_ring
*tx_ring
)
2321 if (tx_ring
->wq_base
) {
2322 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2323 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2324 tx_ring
->wq_base
= NULL
;
2330 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2331 struct tx_ring
*tx_ring
)
2334 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2335 &tx_ring
->wq_base_dma
);
2337 if ((tx_ring
->wq_base
== NULL
)
2338 || tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2339 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2343 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2344 if (tx_ring
->q
== NULL
)
2349 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2350 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2354 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2356 struct bq_desc
*lbq_desc
;
2358 uint32_t curr_idx
, clean_idx
;
2360 curr_idx
= rx_ring
->lbq_curr_idx
;
2361 clean_idx
= rx_ring
->lbq_clean_idx
;
2362 while (curr_idx
!= clean_idx
) {
2363 lbq_desc
= &rx_ring
->lbq
[curr_idx
];
2365 if (lbq_desc
->p
.pg_chunk
.last_flag
) {
2366 pci_unmap_page(qdev
->pdev
,
2367 lbq_desc
->p
.pg_chunk
.map
,
2368 ql_lbq_block_size(qdev
),
2369 PCI_DMA_FROMDEVICE
);
2370 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
2373 put_page(lbq_desc
->p
.pg_chunk
.page
);
2374 lbq_desc
->p
.pg_chunk
.page
= NULL
;
2376 if (++curr_idx
== rx_ring
->lbq_len
)
2382 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2385 struct bq_desc
*sbq_desc
;
2387 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2388 sbq_desc
= &rx_ring
->sbq
[i
];
2389 if (sbq_desc
== NULL
) {
2390 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2393 if (sbq_desc
->p
.skb
) {
2394 pci_unmap_single(qdev
->pdev
,
2395 pci_unmap_addr(sbq_desc
, mapaddr
),
2396 pci_unmap_len(sbq_desc
, maplen
),
2397 PCI_DMA_FROMDEVICE
);
2398 dev_kfree_skb(sbq_desc
->p
.skb
);
2399 sbq_desc
->p
.skb
= NULL
;
2404 /* Free all large and small rx buffers associated
2405 * with the completion queues for this device.
2407 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2410 struct rx_ring
*rx_ring
;
2412 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2413 rx_ring
= &qdev
->rx_ring
[i
];
2415 ql_free_lbq_buffers(qdev
, rx_ring
);
2417 ql_free_sbq_buffers(qdev
, rx_ring
);
2421 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2423 struct rx_ring
*rx_ring
;
2426 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2427 rx_ring
= &qdev
->rx_ring
[i
];
2428 if (rx_ring
->type
!= TX_Q
)
2429 ql_update_buffer_queues(qdev
, rx_ring
);
2433 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2434 struct rx_ring
*rx_ring
)
2437 struct bq_desc
*lbq_desc
;
2438 __le64
*bq
= rx_ring
->lbq_base
;
2440 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2441 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2442 lbq_desc
= &rx_ring
->lbq
[i
];
2443 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2444 lbq_desc
->index
= i
;
2445 lbq_desc
->addr
= bq
;
2450 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2451 struct rx_ring
*rx_ring
)
2454 struct bq_desc
*sbq_desc
;
2455 __le64
*bq
= rx_ring
->sbq_base
;
2457 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2458 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2459 sbq_desc
= &rx_ring
->sbq
[i
];
2460 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2461 sbq_desc
->index
= i
;
2462 sbq_desc
->addr
= bq
;
2467 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2468 struct rx_ring
*rx_ring
)
2470 /* Free the small buffer queue. */
2471 if (rx_ring
->sbq_base
) {
2472 pci_free_consistent(qdev
->pdev
,
2474 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2475 rx_ring
->sbq_base
= NULL
;
2478 /* Free the small buffer queue control blocks. */
2479 kfree(rx_ring
->sbq
);
2480 rx_ring
->sbq
= NULL
;
2482 /* Free the large buffer queue. */
2483 if (rx_ring
->lbq_base
) {
2484 pci_free_consistent(qdev
->pdev
,
2486 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2487 rx_ring
->lbq_base
= NULL
;
2490 /* Free the large buffer queue control blocks. */
2491 kfree(rx_ring
->lbq
);
2492 rx_ring
->lbq
= NULL
;
2494 /* Free the rx queue. */
2495 if (rx_ring
->cq_base
) {
2496 pci_free_consistent(qdev
->pdev
,
2498 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2499 rx_ring
->cq_base
= NULL
;
2503 /* Allocate queues and buffers for this completions queue based
2504 * on the values in the parameter structure. */
2505 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2506 struct rx_ring
*rx_ring
)
2510 * Allocate the completion queue for this rx_ring.
2513 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2514 &rx_ring
->cq_base_dma
);
2516 if (rx_ring
->cq_base
== NULL
) {
2517 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2521 if (rx_ring
->sbq_len
) {
2523 * Allocate small buffer queue.
2526 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2527 &rx_ring
->sbq_base_dma
);
2529 if (rx_ring
->sbq_base
== NULL
) {
2530 QPRINTK(qdev
, IFUP
, ERR
,
2531 "Small buffer queue allocation failed.\n");
2536 * Allocate small buffer queue control blocks.
2539 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2541 if (rx_ring
->sbq
== NULL
) {
2542 QPRINTK(qdev
, IFUP
, ERR
,
2543 "Small buffer queue control block allocation failed.\n");
2547 ql_init_sbq_ring(qdev
, rx_ring
);
2550 if (rx_ring
->lbq_len
) {
2552 * Allocate large buffer queue.
2555 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2556 &rx_ring
->lbq_base_dma
);
2558 if (rx_ring
->lbq_base
== NULL
) {
2559 QPRINTK(qdev
, IFUP
, ERR
,
2560 "Large buffer queue allocation failed.\n");
2564 * Allocate large buffer queue control blocks.
2567 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2569 if (rx_ring
->lbq
== NULL
) {
2570 QPRINTK(qdev
, IFUP
, ERR
,
2571 "Large buffer queue control block allocation failed.\n");
2575 ql_init_lbq_ring(qdev
, rx_ring
);
2581 ql_free_rx_resources(qdev
, rx_ring
);
2585 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2587 struct tx_ring
*tx_ring
;
2588 struct tx_ring_desc
*tx_ring_desc
;
2592 * Loop through all queues and free
2595 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2596 tx_ring
= &qdev
->tx_ring
[j
];
2597 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2598 tx_ring_desc
= &tx_ring
->q
[i
];
2599 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2600 QPRINTK(qdev
, IFDOWN
, ERR
,
2601 "Freeing lost SKB %p, from queue %d, index %d.\n",
2602 tx_ring_desc
->skb
, j
,
2603 tx_ring_desc
->index
);
2604 ql_unmap_send(qdev
, tx_ring_desc
,
2605 tx_ring_desc
->map_cnt
);
2606 dev_kfree_skb(tx_ring_desc
->skb
);
2607 tx_ring_desc
->skb
= NULL
;
2613 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2617 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2618 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2619 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2620 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2621 ql_free_shadow_space(qdev
);
2624 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2628 /* Allocate space for our shadow registers and such. */
2629 if (ql_alloc_shadow_space(qdev
))
2632 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2633 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2634 QPRINTK(qdev
, IFUP
, ERR
,
2635 "RX resource allocation failed.\n");
2639 /* Allocate tx queue resources */
2640 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2641 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2642 QPRINTK(qdev
, IFUP
, ERR
,
2643 "TX resource allocation failed.\n");
2650 ql_free_mem_resources(qdev
);
2654 /* Set up the rx ring control block and pass it to the chip.
2655 * The control block is defined as
2656 * "Completion Queue Initialization Control Block", or cqicb.
2658 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2660 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2661 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2662 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2663 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2664 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2665 void __iomem
*doorbell_area
=
2666 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2670 __le64
*base_indirect_ptr
;
2673 /* Set up the shadow registers for this ring. */
2674 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2675 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2676 *rx_ring
->prod_idx_sh_reg
= 0;
2677 shadow_reg
+= sizeof(u64
);
2678 shadow_reg_dma
+= sizeof(u64
);
2679 rx_ring
->lbq_base_indirect
= shadow_reg
;
2680 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2681 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2682 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2683 rx_ring
->sbq_base_indirect
= shadow_reg
;
2684 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2686 /* PCI doorbell mem area + 0x00 for consumer index register */
2687 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2688 rx_ring
->cnsmr_idx
= 0;
2689 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2691 /* PCI doorbell mem area + 0x04 for valid register */
2692 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2694 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2695 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2697 /* PCI doorbell mem area + 0x1c */
2698 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2700 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2701 cqicb
->msix_vect
= rx_ring
->irq
;
2703 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2704 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2706 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
2708 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
2711 * Set up the control block load flags.
2713 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2714 FLAGS_LV
| /* Load MSI-X vector */
2715 FLAGS_LI
; /* Load irq delay values */
2716 if (rx_ring
->lbq_len
) {
2717 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2718 tmp
= (u64
)rx_ring
->lbq_base_dma
;
2719 base_indirect_ptr
= (__le64
*) rx_ring
->lbq_base_indirect
;
2722 *base_indirect_ptr
= cpu_to_le64(tmp
);
2723 tmp
+= DB_PAGE_SIZE
;
2724 base_indirect_ptr
++;
2726 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2728 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
2729 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2730 (u16
) rx_ring
->lbq_buf_size
;
2731 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2732 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2733 (u16
) rx_ring
->lbq_len
;
2734 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2735 rx_ring
->lbq_prod_idx
= 0;
2736 rx_ring
->lbq_curr_idx
= 0;
2737 rx_ring
->lbq_clean_idx
= 0;
2738 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
2740 if (rx_ring
->sbq_len
) {
2741 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2742 tmp
= (u64
)rx_ring
->sbq_base_dma
;
2743 base_indirect_ptr
= (__le64
*) rx_ring
->sbq_base_indirect
;
2746 *base_indirect_ptr
= cpu_to_le64(tmp
);
2747 tmp
+= DB_PAGE_SIZE
;
2748 base_indirect_ptr
++;
2750 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
2752 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
2753 cqicb
->sbq_buf_size
=
2754 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
));
2755 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2756 (u16
) rx_ring
->sbq_len
;
2757 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2758 rx_ring
->sbq_prod_idx
= 0;
2759 rx_ring
->sbq_curr_idx
= 0;
2760 rx_ring
->sbq_clean_idx
= 0;
2761 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
2763 switch (rx_ring
->type
) {
2765 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2766 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2769 /* Inbound completion handling rx_rings run in
2770 * separate NAPI contexts.
2772 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2774 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2775 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2778 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2781 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing rx work queue.\n");
2782 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2783 CFG_LCQ
, rx_ring
->cq_id
);
2785 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2791 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2793 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2794 void __iomem
*doorbell_area
=
2795 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2796 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2797 (tx_ring
->wq_id
* sizeof(u64
));
2798 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2799 (tx_ring
->wq_id
* sizeof(u64
));
2803 * Assign doorbell registers for this tx_ring.
2805 /* TX PCI doorbell mem area for tx producer index */
2806 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2807 tx_ring
->prod_idx
= 0;
2808 /* TX PCI doorbell mem area + 0x04 */
2809 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2812 * Assign shadow registers for this tx_ring.
2814 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2815 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2817 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2818 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2819 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2820 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2822 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
2824 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
2826 ql_init_tx_ring(qdev
, tx_ring
);
2828 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
2829 (u16
) tx_ring
->wq_id
);
2831 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2834 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded WQICB.\n");
2838 static void ql_disable_msix(struct ql_adapter
*qdev
)
2840 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2841 pci_disable_msix(qdev
->pdev
);
2842 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2843 kfree(qdev
->msi_x_entry
);
2844 qdev
->msi_x_entry
= NULL
;
2845 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2846 pci_disable_msi(qdev
->pdev
);
2847 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2851 /* We start by trying to get the number of vectors
2852 * stored in qdev->intr_count. If we don't get that
2853 * many then we reduce the count and try again.
2855 static void ql_enable_msix(struct ql_adapter
*qdev
)
2859 /* Get the MSIX vectors. */
2860 if (irq_type
== MSIX_IRQ
) {
2861 /* Try to alloc space for the msix struct,
2862 * if it fails then go to MSI/legacy.
2864 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
2865 sizeof(struct msix_entry
),
2867 if (!qdev
->msi_x_entry
) {
2872 for (i
= 0; i
< qdev
->intr_count
; i
++)
2873 qdev
->msi_x_entry
[i
].entry
= i
;
2875 /* Loop to get our vectors. We start with
2876 * what we want and settle for what we get.
2879 err
= pci_enable_msix(qdev
->pdev
,
2880 qdev
->msi_x_entry
, qdev
->intr_count
);
2882 qdev
->intr_count
= err
;
2886 kfree(qdev
->msi_x_entry
);
2887 qdev
->msi_x_entry
= NULL
;
2888 QPRINTK(qdev
, IFUP
, WARNING
,
2889 "MSI-X Enable failed, trying MSI.\n");
2890 qdev
->intr_count
= 1;
2892 } else if (err
== 0) {
2893 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2894 QPRINTK(qdev
, IFUP
, INFO
,
2895 "MSI-X Enabled, got %d vectors.\n",
2901 qdev
->intr_count
= 1;
2902 if (irq_type
== MSI_IRQ
) {
2903 if (!pci_enable_msi(qdev
->pdev
)) {
2904 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2905 QPRINTK(qdev
, IFUP
, INFO
,
2906 "Running with MSI interrupts.\n");
2911 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2914 /* Each vector services 1 RSS ring and and 1 or more
2915 * TX completion rings. This function loops through
2916 * the TX completion rings and assigns the vector that
2917 * will service it. An example would be if there are
2918 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2919 * This would mean that vector 0 would service RSS ring 0
2920 * and TX competion rings 0,1,2 and 3. Vector 1 would
2921 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2923 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
2926 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2928 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2929 /* Assign irq vectors to TX rx_rings.*/
2930 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
2931 i
< qdev
->rx_ring_count
; i
++) {
2932 if (j
== tx_rings_per_vector
) {
2936 qdev
->rx_ring
[i
].irq
= vect
;
2940 /* For single vector all rings have an irq
2943 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2944 qdev
->rx_ring
[i
].irq
= 0;
2948 /* Set the interrupt mask for this vector. Each vector
2949 * will service 1 RSS ring and 1 or more TX completion
2950 * rings. This function sets up a bit mask per vector
2951 * that indicates which rings it services.
2953 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
2955 int j
, vect
= ctx
->intr
;
2956 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2958 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2959 /* Add the RSS ring serviced by this vector
2962 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
2963 /* Add the TX ring(s) serviced by this vector
2965 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
2967 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
2968 (vect
* tx_rings_per_vector
) + j
].cq_id
);
2971 /* For single vector we just shift each queue's
2974 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
2975 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
2980 * Here we build the intr_context structures based on
2981 * our rx_ring count and intr vector count.
2982 * The intr_context structure is used to hook each vector
2983 * to possibly different handlers.
2985 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
2988 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2990 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2991 /* Each rx_ring has it's
2992 * own intr_context since we have separate
2993 * vectors for each queue.
2995 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2996 qdev
->rx_ring
[i
].irq
= i
;
2997 intr_context
->intr
= i
;
2998 intr_context
->qdev
= qdev
;
2999 /* Set up this vector's bit-mask that indicates
3000 * which queues it services.
3002 ql_set_irq_mask(qdev
, intr_context
);
3004 * We set up each vectors enable/disable/read bits so
3005 * there's no bit/mask calculations in the critical path.
3007 intr_context
->intr_en_mask
=
3008 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3009 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
3011 intr_context
->intr_dis_mask
=
3012 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3013 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
3015 intr_context
->intr_read_mask
=
3016 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3017 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
3020 /* The first vector/queue handles
3021 * broadcast/multicast, fatal errors,
3022 * and firmware events. This in addition
3023 * to normal inbound NAPI processing.
3025 intr_context
->handler
= qlge_isr
;
3026 sprintf(intr_context
->name
, "%s-rx-%d",
3027 qdev
->ndev
->name
, i
);
3030 * Inbound queues handle unicast frames only.
3032 intr_context
->handler
= qlge_msix_rx_isr
;
3033 sprintf(intr_context
->name
, "%s-rx-%d",
3034 qdev
->ndev
->name
, i
);
3039 * All rx_rings use the same intr_context since
3040 * there is only one vector.
3042 intr_context
->intr
= 0;
3043 intr_context
->qdev
= qdev
;
3045 * We set up each vectors enable/disable/read bits so
3046 * there's no bit/mask calculations in the critical path.
3048 intr_context
->intr_en_mask
=
3049 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
3050 intr_context
->intr_dis_mask
=
3051 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3052 INTR_EN_TYPE_DISABLE
;
3053 intr_context
->intr_read_mask
=
3054 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
3056 * Single interrupt means one handler for all rings.
3058 intr_context
->handler
= qlge_isr
;
3059 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3060 /* Set up this vector's bit-mask that indicates
3061 * which queues it services. In this case there is
3062 * a single vector so it will service all RSS and
3063 * TX completion rings.
3065 ql_set_irq_mask(qdev
, intr_context
);
3067 /* Tell the TX completion rings which MSIx vector
3068 * they will be using.
3070 ql_set_tx_vect(qdev
);
3073 static void ql_free_irq(struct ql_adapter
*qdev
)
3076 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3078 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3079 if (intr_context
->hooked
) {
3080 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3081 free_irq(qdev
->msi_x_entry
[i
].vector
,
3083 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3084 "freeing msix interrupt %d.\n", i
);
3086 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3087 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3088 "freeing msi interrupt %d.\n", i
);
3092 ql_disable_msix(qdev
);
3095 static int ql_request_irq(struct ql_adapter
*qdev
)
3099 struct pci_dev
*pdev
= qdev
->pdev
;
3100 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3102 ql_resolve_queues_to_irqs(qdev
);
3104 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3105 atomic_set(&intr_context
->irq_cnt
, 0);
3106 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3107 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3108 intr_context
->handler
,
3113 QPRINTK(qdev
, IFUP
, ERR
,
3114 "Failed request for MSIX interrupt %d.\n",
3118 QPRINTK(qdev
, IFUP
, DEBUG
,
3119 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3121 qdev
->rx_ring
[i
].type
==
3122 DEFAULT_Q
? "DEFAULT_Q" : "",
3123 qdev
->rx_ring
[i
].type
==
3125 qdev
->rx_ring
[i
].type
==
3126 RX_Q
? "RX_Q" : "", intr_context
->name
);
3129 QPRINTK(qdev
, IFUP
, DEBUG
,
3130 "trying msi or legacy interrupts.\n");
3131 QPRINTK(qdev
, IFUP
, DEBUG
,
3132 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3133 QPRINTK(qdev
, IFUP
, DEBUG
,
3134 "%s: context->name = %s.\n", __func__
,
3135 intr_context
->name
);
3136 QPRINTK(qdev
, IFUP
, DEBUG
,
3137 "%s: dev_id = 0x%p.\n", __func__
,
3140 request_irq(pdev
->irq
, qlge_isr
,
3141 test_bit(QL_MSI_ENABLED
,
3143 flags
) ? 0 : IRQF_SHARED
,
3144 intr_context
->name
, &qdev
->rx_ring
[0]);
3148 QPRINTK(qdev
, IFUP
, ERR
,
3149 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3151 qdev
->rx_ring
[0].type
==
3152 DEFAULT_Q
? "DEFAULT_Q" : "",
3153 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
3154 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3155 intr_context
->name
);
3157 intr_context
->hooked
= 1;
3161 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
3166 static int ql_start_rss(struct ql_adapter
*qdev
)
3168 u8 init_hash_seed
[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3169 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3170 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3171 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3172 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3173 0xbe, 0xac, 0x01, 0xfa};
3174 struct ricb
*ricb
= &qdev
->ricb
;
3177 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3179 memset((void *)ricb
, 0, sizeof(*ricb
));
3181 ricb
->base_cq
= RSS_L4K
;
3183 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3184 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3187 * Fill out the Indirection Table.
3189 for (i
= 0; i
< 1024; i
++)
3190 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3192 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3193 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3195 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing RSS.\n");
3197 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3199 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
3202 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded RICB.\n");
3206 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3210 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3213 /* Clear all the entries in the routing table. */
3214 for (i
= 0; i
< 16; i
++) {
3215 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3217 QPRINTK(qdev
, IFUP
, ERR
,
3218 "Failed to init routing register for CAM "
3223 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3227 /* Initialize the frame-to-queue routing. */
3228 static int ql_route_initialize(struct ql_adapter
*qdev
)
3232 /* Clear all the entries in the routing table. */
3233 status
= ql_clear_routing_entries(qdev
);
3237 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3241 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
3243 QPRINTK(qdev
, IFUP
, ERR
,
3244 "Failed to init routing register for error packets.\n");
3247 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3249 QPRINTK(qdev
, IFUP
, ERR
,
3250 "Failed to init routing register for broadcast packets.\n");
3253 /* If we have more than one inbound queue, then turn on RSS in the
3256 if (qdev
->rss_ring_count
> 1) {
3257 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3258 RT_IDX_RSS_MATCH
, 1);
3260 QPRINTK(qdev
, IFUP
, ERR
,
3261 "Failed to init routing register for MATCH RSS packets.\n");
3266 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3269 QPRINTK(qdev
, IFUP
, ERR
,
3270 "Failed to init routing register for CAM packets.\n");
3272 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3276 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3280 /* If check if the link is up and use to
3281 * determine if we are setting or clearing
3282 * the MAC address in the CAM.
3284 set
= ql_read32(qdev
, STS
);
3285 set
&= qdev
->port_link_up
;
3286 status
= ql_set_mac_addr(qdev
, set
);
3288 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
3292 status
= ql_route_initialize(qdev
);
3294 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
3299 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3306 * Set up the System register to halt on errors.
3308 value
= SYS_EFE
| SYS_FAE
;
3310 ql_write32(qdev
, SYS
, mask
| value
);
3312 /* Set the default queue, and VLAN behavior. */
3313 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3314 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3315 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3317 /* Set the MPI interrupt to enabled. */
3318 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3320 /* Enable the function, set pagesize, enable error checking. */
3321 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3322 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3324 /* Set/clear header splitting. */
3325 mask
= FSC_VM_PAGESIZE_MASK
|
3326 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3327 ql_write32(qdev
, FSC
, mask
| value
);
3329 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3330 min(SMALL_BUF_MAP_SIZE
, MAX_SPLIT_SIZE
));
3332 /* Set RX packet routing to use port/pci function on which the
3333 * packet arrived on in addition to usual frame routing.
3334 * This is helpful on bonding where both interfaces can have
3335 * the same MAC address.
3337 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3339 /* Start up the rx queues. */
3340 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3341 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3343 QPRINTK(qdev
, IFUP
, ERR
,
3344 "Failed to start rx ring[%d].\n", i
);
3349 /* If there is more than one inbound completion queue
3350 * then download a RICB to configure RSS.
3352 if (qdev
->rss_ring_count
> 1) {
3353 status
= ql_start_rss(qdev
);
3355 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3360 /* Start up the tx queues. */
3361 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3362 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3364 QPRINTK(qdev
, IFUP
, ERR
,
3365 "Failed to start tx ring[%d].\n", i
);
3370 /* Initialize the port and set the max framesize. */
3371 status
= qdev
->nic_ops
->port_initialize(qdev
);
3373 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3375 /* Set up the MAC address and frame routing filter. */
3376 status
= ql_cam_route_initialize(qdev
);
3378 QPRINTK(qdev
, IFUP
, ERR
,
3379 "Failed to init CAM/Routing tables.\n");
3383 /* Start NAPI for the RSS queues. */
3384 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3385 QPRINTK(qdev
, IFUP
, DEBUG
, "Enabling NAPI for rx_ring[%d].\n",
3387 napi_enable(&qdev
->rx_ring
[i
].napi
);
3393 /* Issue soft reset to chip. */
3394 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3398 unsigned long end_jiffies
;
3400 /* Clear all the entries in the routing table. */
3401 status
= ql_clear_routing_entries(qdev
);
3403 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear routing bits.\n");
3407 end_jiffies
= jiffies
+
3408 max((unsigned long)1, usecs_to_jiffies(30));
3410 /* Stop management traffic. */
3411 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3413 /* Wait for the NIC and MGMNT FIFOs to empty. */
3414 ql_wait_fifo_empty(qdev
);
3416 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3419 value
= ql_read32(qdev
, RST_FO
);
3420 if ((value
& RST_FO_FR
) == 0)
3423 } while (time_before(jiffies
, end_jiffies
));
3425 if (value
& RST_FO_FR
) {
3426 QPRINTK(qdev
, IFDOWN
, ERR
,
3427 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3428 status
= -ETIMEDOUT
;
3431 /* Resume management traffic. */
3432 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3436 static void ql_display_dev_info(struct net_device
*ndev
)
3438 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3440 QPRINTK(qdev
, PROBE
, INFO
,
3441 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3442 "XG Roll = %d, XG Rev = %d.\n",
3445 qdev
->chip_rev_id
& 0x0000000f,
3446 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3447 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3448 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3449 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3452 static int ql_adapter_down(struct ql_adapter
*qdev
)
3458 /* Don't kill the reset worker thread if we
3459 * are in the process of recovery.
3461 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3462 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3463 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3464 cancel_delayed_work_sync(&qdev
->mpi_work
);
3465 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3466 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3468 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3469 napi_disable(&qdev
->rx_ring
[i
].napi
);
3471 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3473 ql_disable_interrupts(qdev
);
3475 ql_tx_ring_clean(qdev
);
3477 /* Call netif_napi_del() from common point.
3479 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3480 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3482 ql_free_rx_buffers(qdev
);
3484 status
= ql_adapter_reset(qdev
);
3486 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3491 static int ql_adapter_up(struct ql_adapter
*qdev
)
3495 err
= ql_adapter_initialize(qdev
);
3497 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3500 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3501 ql_alloc_rx_buffers(qdev
);
3502 /* If the port is initialized and the
3503 * link is up the turn on the carrier.
3505 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3506 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3508 ql_enable_interrupts(qdev
);
3509 ql_enable_all_completion_interrupts(qdev
);
3510 netif_tx_start_all_queues(qdev
->ndev
);
3514 ql_adapter_reset(qdev
);
3518 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3520 ql_free_mem_resources(qdev
);
3524 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3528 if (ql_alloc_mem_resources(qdev
)) {
3529 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3532 status
= ql_request_irq(qdev
);
3536 static int qlge_close(struct net_device
*ndev
)
3538 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3541 * Wait for device to recover from a reset.
3542 * (Rarely happens, but possible.)
3544 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3546 ql_adapter_down(qdev
);
3547 ql_release_adapter_resources(qdev
);
3551 static int ql_configure_rings(struct ql_adapter
*qdev
)
3554 struct rx_ring
*rx_ring
;
3555 struct tx_ring
*tx_ring
;
3556 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
3557 unsigned int lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
3558 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
3560 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
3562 /* In a perfect world we have one RSS ring for each CPU
3563 * and each has it's own vector. To do that we ask for
3564 * cpu_cnt vectors. ql_enable_msix() will adjust the
3565 * vector count to what we actually get. We then
3566 * allocate an RSS ring for each.
3567 * Essentially, we are doing min(cpu_count, msix_vector_count).
3569 qdev
->intr_count
= cpu_cnt
;
3570 ql_enable_msix(qdev
);
3571 /* Adjust the RSS ring count to the actual vector count. */
3572 qdev
->rss_ring_count
= qdev
->intr_count
;
3573 qdev
->tx_ring_count
= cpu_cnt
;
3574 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
3576 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3577 tx_ring
= &qdev
->tx_ring
[i
];
3578 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
3579 tx_ring
->qdev
= qdev
;
3581 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3583 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3586 * The completion queue ID for the tx rings start
3587 * immediately after the rss rings.
3589 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
3592 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3593 rx_ring
= &qdev
->rx_ring
[i
];
3594 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
3595 rx_ring
->qdev
= qdev
;
3597 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3598 if (i
< qdev
->rss_ring_count
) {
3600 * Inbound (RSS) queues.
3602 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3604 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3605 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3607 rx_ring
->lbq_len
* sizeof(__le64
);
3608 rx_ring
->lbq_buf_size
= (u16
)lbq_buf_len
;
3609 QPRINTK(qdev
, IFUP
, DEBUG
,
3610 "lbq_buf_size %d, order = %d\n",
3611 rx_ring
->lbq_buf_size
, qdev
->lbq_buf_order
);
3612 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3614 rx_ring
->sbq_len
* sizeof(__le64
);
3615 rx_ring
->sbq_buf_size
= SMALL_BUF_MAP_SIZE
;
3616 rx_ring
->type
= RX_Q
;
3619 * Outbound queue handles outbound completions only.
3621 /* outbound cq is same size as tx_ring it services. */
3622 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3624 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3625 rx_ring
->lbq_len
= 0;
3626 rx_ring
->lbq_size
= 0;
3627 rx_ring
->lbq_buf_size
= 0;
3628 rx_ring
->sbq_len
= 0;
3629 rx_ring
->sbq_size
= 0;
3630 rx_ring
->sbq_buf_size
= 0;
3631 rx_ring
->type
= TX_Q
;
3637 static int qlge_open(struct net_device
*ndev
)
3640 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3642 err
= ql_configure_rings(qdev
);
3646 err
= ql_get_adapter_resources(qdev
);
3650 err
= ql_adapter_up(qdev
);
3657 ql_release_adapter_resources(qdev
);
3661 static int ql_change_rx_buffers(struct ql_adapter
*qdev
)
3663 struct rx_ring
*rx_ring
;
3667 /* Wait for an oustanding reset to complete. */
3668 if (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
3670 while (i
-- && !test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
3671 QPRINTK(qdev
, IFUP
, ERR
,
3672 "Waiting for adapter UP...\n");
3677 QPRINTK(qdev
, IFUP
, ERR
,
3678 "Timed out waiting for adapter UP\n");
3683 status
= ql_adapter_down(qdev
);
3687 /* Get the new rx buffer size. */
3688 lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
3689 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
3690 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
3692 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3693 rx_ring
= &qdev
->rx_ring
[i
];
3694 /* Set the new size. */
3695 rx_ring
->lbq_buf_size
= lbq_buf_len
;
3698 status
= ql_adapter_up(qdev
);
3704 QPRINTK(qdev
, IFUP
, ALERT
,
3705 "Driver up/down cycle failed, closing device.\n");
3706 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3707 dev_close(qdev
->ndev
);
3711 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3713 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3716 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3717 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3718 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3719 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3720 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3721 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3726 queue_delayed_work(qdev
->workqueue
,
3727 &qdev
->mpi_port_cfg_work
, 3*HZ
);
3729 if (!netif_running(qdev
->ndev
)) {
3730 ndev
->mtu
= new_mtu
;
3734 ndev
->mtu
= new_mtu
;
3735 status
= ql_change_rx_buffers(qdev
);
3737 QPRINTK(qdev
, IFUP
, ERR
,
3738 "Changing MTU failed.\n");
3744 static struct net_device_stats
*qlge_get_stats(struct net_device
3747 return &ndev
->stats
;
3750 static void qlge_set_multicast_list(struct net_device
*ndev
)
3752 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3753 struct dev_mc_list
*mc_ptr
;
3756 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3760 * Set or clear promiscuous mode if a
3761 * transition is taking place.
3763 if (ndev
->flags
& IFF_PROMISC
) {
3764 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3765 if (ql_set_routing_reg
3766 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3767 QPRINTK(qdev
, HW
, ERR
,
3768 "Failed to set promiscous mode.\n");
3770 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3774 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3775 if (ql_set_routing_reg
3776 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3777 QPRINTK(qdev
, HW
, ERR
,
3778 "Failed to clear promiscous mode.\n");
3780 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3786 * Set or clear all multicast mode if a
3787 * transition is taking place.
3789 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3790 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3791 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3792 if (ql_set_routing_reg
3793 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3794 QPRINTK(qdev
, HW
, ERR
,
3795 "Failed to set all-multi mode.\n");
3797 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3801 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3802 if (ql_set_routing_reg
3803 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3804 QPRINTK(qdev
, HW
, ERR
,
3805 "Failed to clear all-multi mode.\n");
3807 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3812 if (ndev
->mc_count
) {
3813 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3816 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3817 i
++, mc_ptr
= mc_ptr
->next
)
3818 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3819 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3820 QPRINTK(qdev
, HW
, ERR
,
3821 "Failed to loadmulticast address.\n");
3822 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3825 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3826 if (ql_set_routing_reg
3827 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3828 QPRINTK(qdev
, HW
, ERR
,
3829 "Failed to set multicast match mode.\n");
3831 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3835 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3838 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3840 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3841 struct sockaddr
*addr
= p
;
3844 if (netif_running(ndev
))
3847 if (!is_valid_ether_addr(addr
->sa_data
))
3848 return -EADDRNOTAVAIL
;
3849 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3851 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3854 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3855 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3857 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3858 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3862 static void qlge_tx_timeout(struct net_device
*ndev
)
3864 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3865 ql_queue_asic_error(qdev
);
3868 static void ql_asic_reset_work(struct work_struct
*work
)
3870 struct ql_adapter
*qdev
=
3871 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3874 status
= ql_adapter_down(qdev
);
3878 status
= ql_adapter_up(qdev
);
3882 /* Restore rx mode. */
3883 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3884 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3885 qlge_set_multicast_list(qdev
->ndev
);
3890 QPRINTK(qdev
, IFUP
, ALERT
,
3891 "Driver up/down cycle failed, closing device\n");
3893 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3894 dev_close(qdev
->ndev
);
3898 static struct nic_operations qla8012_nic_ops
= {
3899 .get_flash
= ql_get_8012_flash_params
,
3900 .port_initialize
= ql_8012_port_initialize
,
3903 static struct nic_operations qla8000_nic_ops
= {
3904 .get_flash
= ql_get_8000_flash_params
,
3905 .port_initialize
= ql_8000_port_initialize
,
3908 /* Find the pcie function number for the other NIC
3909 * on this chip. Since both NIC functions share a
3910 * common firmware we have the lowest enabled function
3911 * do any common work. Examples would be resetting
3912 * after a fatal firmware error, or doing a firmware
3915 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
3919 u32 nic_func1
, nic_func2
;
3921 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
3926 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
3927 MPI_TEST_NIC_FUNC_MASK
);
3928 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
3929 MPI_TEST_NIC_FUNC_MASK
);
3931 if (qdev
->func
== nic_func1
)
3932 qdev
->alt_func
= nic_func2
;
3933 else if (qdev
->func
== nic_func2
)
3934 qdev
->alt_func
= nic_func1
;
3941 static int ql_get_board_info(struct ql_adapter
*qdev
)
3945 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
3949 status
= ql_get_alt_pcie_func(qdev
);
3953 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
3955 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
3956 qdev
->port_link_up
= STS_PL1
;
3957 qdev
->port_init
= STS_PI1
;
3958 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
3959 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
3961 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
3962 qdev
->port_link_up
= STS_PL0
;
3963 qdev
->port_init
= STS_PI0
;
3964 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
3965 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
3967 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
3968 qdev
->device_id
= qdev
->pdev
->device
;
3969 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
3970 qdev
->nic_ops
= &qla8012_nic_ops
;
3971 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
3972 qdev
->nic_ops
= &qla8000_nic_ops
;
3976 static void ql_release_all(struct pci_dev
*pdev
)
3978 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3979 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3981 if (qdev
->workqueue
) {
3982 destroy_workqueue(qdev
->workqueue
);
3983 qdev
->workqueue
= NULL
;
3987 iounmap(qdev
->reg_base
);
3988 if (qdev
->doorbell_area
)
3989 iounmap(qdev
->doorbell_area
);
3990 pci_release_regions(pdev
);
3991 pci_set_drvdata(pdev
, NULL
);
3994 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
3995 struct net_device
*ndev
, int cards_found
)
3997 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4000 memset((void *)qdev
, 0, sizeof(*qdev
));
4001 err
= pci_enable_device(pdev
);
4003 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
4009 pci_set_drvdata(pdev
, ndev
);
4011 /* Set PCIe read request size */
4012 err
= pcie_set_readrq(pdev
, 4096);
4014 dev_err(&pdev
->dev
, "Set readrq failed.\n");
4018 err
= pci_request_regions(pdev
, DRV_NAME
);
4020 dev_err(&pdev
->dev
, "PCI region request failed.\n");
4024 pci_set_master(pdev
);
4025 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4026 set_bit(QL_DMA64
, &qdev
->flags
);
4027 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4029 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4031 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
4035 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
4040 ioremap_nocache(pci_resource_start(pdev
, 1),
4041 pci_resource_len(pdev
, 1));
4042 if (!qdev
->reg_base
) {
4043 dev_err(&pdev
->dev
, "Register mapping failed.\n");
4048 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
4049 qdev
->doorbell_area
=
4050 ioremap_nocache(pci_resource_start(pdev
, 3),
4051 pci_resource_len(pdev
, 3));
4052 if (!qdev
->doorbell_area
) {
4053 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
4058 err
= ql_get_board_info(qdev
);
4060 dev_err(&pdev
->dev
, "Register access failed.\n");
4064 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
4065 spin_lock_init(&qdev
->hw_lock
);
4066 spin_lock_init(&qdev
->stats_lock
);
4068 /* make sure the EEPROM is good */
4069 err
= qdev
->nic_ops
->get_flash(qdev
);
4071 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
4075 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4077 /* Set up the default ring sizes. */
4078 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
4079 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
4081 /* Set up the coalescing parameters. */
4082 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4083 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4084 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4085 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4088 * Set up the operating parameters.
4091 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
4092 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
4093 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
4094 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
4095 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
4096 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
4097 init_completion(&qdev
->ide_completion
);
4100 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
4101 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
4102 DRV_NAME
, DRV_VERSION
);
4106 ql_release_all(pdev
);
4107 pci_disable_device(pdev
);
4112 static const struct net_device_ops qlge_netdev_ops
= {
4113 .ndo_open
= qlge_open
,
4114 .ndo_stop
= qlge_close
,
4115 .ndo_start_xmit
= qlge_send
,
4116 .ndo_change_mtu
= qlge_change_mtu
,
4117 .ndo_get_stats
= qlge_get_stats
,
4118 .ndo_set_multicast_list
= qlge_set_multicast_list
,
4119 .ndo_set_mac_address
= qlge_set_mac_address
,
4120 .ndo_validate_addr
= eth_validate_addr
,
4121 .ndo_tx_timeout
= qlge_tx_timeout
,
4122 .ndo_vlan_rx_register
= ql_vlan_rx_register
,
4123 .ndo_vlan_rx_add_vid
= ql_vlan_rx_add_vid
,
4124 .ndo_vlan_rx_kill_vid
= ql_vlan_rx_kill_vid
,
4127 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4128 const struct pci_device_id
*pci_entry
)
4130 struct net_device
*ndev
= NULL
;
4131 struct ql_adapter
*qdev
= NULL
;
4132 static int cards_found
= 0;
4135 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4136 min(MAX_CPUS
, (int)num_online_cpus()));
4140 err
= ql_init_device(pdev
, ndev
, cards_found
);
4146 qdev
= netdev_priv(ndev
);
4147 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4154 | NETIF_F_HW_VLAN_TX
4155 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
4156 ndev
->features
|= NETIF_F_GRO
;
4158 if (test_bit(QL_DMA64
, &qdev
->flags
))
4159 ndev
->features
|= NETIF_F_HIGHDMA
;
4162 * Set up net_device structure.
4164 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4165 ndev
->irq
= pdev
->irq
;
4167 ndev
->netdev_ops
= &qlge_netdev_ops
;
4168 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4169 ndev
->watchdog_timeo
= 10 * HZ
;
4171 err
= register_netdev(ndev
);
4173 dev_err(&pdev
->dev
, "net device registration failed.\n");
4174 ql_release_all(pdev
);
4175 pci_disable_device(pdev
);
4179 ql_display_dev_info(ndev
);
4184 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4186 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4187 unregister_netdev(ndev
);
4188 ql_release_all(pdev
);
4189 pci_disable_device(pdev
);
4194 * This callback is called by the PCI subsystem whenever
4195 * a PCI bus error is detected.
4197 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4198 enum pci_channel_state state
)
4200 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4201 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4203 netif_device_detach(ndev
);
4205 if (state
== pci_channel_io_perm_failure
)
4206 return PCI_ERS_RESULT_DISCONNECT
;
4208 if (netif_running(ndev
))
4209 ql_adapter_down(qdev
);
4211 pci_disable_device(pdev
);
4213 /* Request a slot reset. */
4214 return PCI_ERS_RESULT_NEED_RESET
;
4218 * This callback is called after the PCI buss has been reset.
4219 * Basically, this tries to restart the card from scratch.
4220 * This is a shortened version of the device probe/discovery code,
4221 * it resembles the first-half of the () routine.
4223 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4225 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4226 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4228 if (pci_enable_device(pdev
)) {
4229 QPRINTK(qdev
, IFUP
, ERR
,
4230 "Cannot re-enable PCI device after reset.\n");
4231 return PCI_ERS_RESULT_DISCONNECT
;
4234 pci_set_master(pdev
);
4236 netif_carrier_off(ndev
);
4237 ql_adapter_reset(qdev
);
4239 /* Make sure the EEPROM is good */
4240 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4242 if (!is_valid_ether_addr(ndev
->perm_addr
)) {
4243 QPRINTK(qdev
, IFUP
, ERR
, "After reset, invalid MAC address.\n");
4244 return PCI_ERS_RESULT_DISCONNECT
;
4247 return PCI_ERS_RESULT_RECOVERED
;
4250 static void qlge_io_resume(struct pci_dev
*pdev
)
4252 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4253 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4255 pci_set_master(pdev
);
4257 if (netif_running(ndev
)) {
4258 if (ql_adapter_up(qdev
)) {
4259 QPRINTK(qdev
, IFUP
, ERR
,
4260 "Device initialization failed after reset.\n");
4265 netif_device_attach(ndev
);
4268 static struct pci_error_handlers qlge_err_handler
= {
4269 .error_detected
= qlge_io_error_detected
,
4270 .slot_reset
= qlge_io_slot_reset
,
4271 .resume
= qlge_io_resume
,
4274 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4276 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4277 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4280 netif_device_detach(ndev
);
4282 if (netif_running(ndev
)) {
4283 err
= ql_adapter_down(qdev
);
4288 err
= pci_save_state(pdev
);
4292 pci_disable_device(pdev
);
4294 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4300 static int qlge_resume(struct pci_dev
*pdev
)
4302 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4303 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4306 pci_set_power_state(pdev
, PCI_D0
);
4307 pci_restore_state(pdev
);
4308 err
= pci_enable_device(pdev
);
4310 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
4313 pci_set_master(pdev
);
4315 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4316 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4318 if (netif_running(ndev
)) {
4319 err
= ql_adapter_up(qdev
);
4324 netif_device_attach(ndev
);
4328 #endif /* CONFIG_PM */
4330 static void qlge_shutdown(struct pci_dev
*pdev
)
4332 qlge_suspend(pdev
, PMSG_SUSPEND
);
4335 static struct pci_driver qlge_driver
= {
4337 .id_table
= qlge_pci_tbl
,
4338 .probe
= qlge_probe
,
4339 .remove
= __devexit_p(qlge_remove
),
4341 .suspend
= qlge_suspend
,
4342 .resume
= qlge_resume
,
4344 .shutdown
= qlge_shutdown
,
4345 .err_handler
= &qlge_err_handler
4348 static int __init
qlge_init_module(void)
4350 return pci_register_driver(&qlge_driver
);
4353 static void __exit
qlge_exit(void)
4355 pci_unregister_driver(&qlge_driver
);
4358 module_init(qlge_init_module
);
4359 module_exit(qlge_exit
);