2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/if_vlan.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
40 #define DRV_NAME "qla3xxx"
41 #define DRV_STRING "QLogic ISP3XXX Network Driver"
42 #define DRV_VERSION "v2.02.00-k36"
43 #define PFX DRV_NAME " "
45 static const char ql3xxx_driver_name
[] = DRV_NAME
;
46 static const char ql3xxx_driver_version
[] = DRV_VERSION
;
48 MODULE_AUTHOR("QLogic Corporation");
49 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION
" ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION
);
53 static const u32 default_msg
54 = NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
55 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
57 static int debug
= -1; /* defaults above */
58 module_param(debug
, int, 0);
59 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
62 module_param(msi
, int, 0);
63 MODULE_PARM_DESC(msi
, "Turn on Message Signaled Interrupts.");
65 static struct pci_device_id ql3xxx_pci_tbl
[] __devinitdata
= {
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QL3022_DEVICE_ID
)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QL3032_DEVICE_ID
)},
68 /* required last entry */
72 MODULE_DEVICE_TABLE(pci
, ql3xxx_pci_tbl
);
75 * Caller must take hw_lock.
77 static int ql_sem_spinlock(struct ql3_adapter
*qdev
,
78 u32 sem_mask
, u32 sem_bits
)
80 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
82 unsigned int seconds
= 3;
85 writel((sem_mask
| sem_bits
),
86 &port_regs
->CommonRegs
.semaphoreReg
);
87 value
= readl(&port_regs
->CommonRegs
.semaphoreReg
);
88 if ((value
& (sem_mask
>> 16)) == sem_bits
)
95 static void ql_sem_unlock(struct ql3_adapter
*qdev
, u32 sem_mask
)
97 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
98 writel(sem_mask
, &port_regs
->CommonRegs
.semaphoreReg
);
99 readl(&port_regs
->CommonRegs
.semaphoreReg
);
102 static int ql_sem_lock(struct ql3_adapter
*qdev
, u32 sem_mask
, u32 sem_bits
)
104 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
107 writel((sem_mask
| sem_bits
), &port_regs
->CommonRegs
.semaphoreReg
);
108 value
= readl(&port_regs
->CommonRegs
.semaphoreReg
);
109 return ((value
& (sem_mask
>> 16)) == sem_bits
);
113 * Caller holds hw_lock.
115 static int ql_wait_for_drvr_lock(struct ql3_adapter
*qdev
)
120 if (!ql_sem_lock(qdev
,
122 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
)
128 printk(KERN_ERR PFX
"%s: Timed out waiting for "
134 printk(KERN_DEBUG PFX
135 "%s: driver lock acquired.\n",
142 static void ql_set_register_page(struct ql3_adapter
*qdev
, u32 page
)
144 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
146 writel(((ISP_CONTROL_NP_MASK
<< 16) | page
),
147 &port_regs
->CommonRegs
.ispControlStatus
);
148 readl(&port_regs
->CommonRegs
.ispControlStatus
);
149 qdev
->current_page
= page
;
152 static u32
ql_read_common_reg_l(struct ql3_adapter
*qdev
,
156 unsigned long hw_flags
;
158 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
160 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
165 static u32
ql_read_common_reg(struct ql3_adapter
*qdev
,
171 static u32
ql_read_page0_reg_l(struct ql3_adapter
*qdev
, u32 __iomem
*reg
)
174 unsigned long hw_flags
;
176 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
178 if (qdev
->current_page
!= 0)
179 ql_set_register_page(qdev
,0);
182 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
186 static u32
ql_read_page0_reg(struct ql3_adapter
*qdev
, u32 __iomem
*reg
)
188 if (qdev
->current_page
!= 0)
189 ql_set_register_page(qdev
,0);
193 static void ql_write_common_reg_l(struct ql3_adapter
*qdev
,
194 u32 __iomem
*reg
, u32 value
)
196 unsigned long hw_flags
;
198 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
201 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
205 static void ql_write_common_reg(struct ql3_adapter
*qdev
,
206 u32 __iomem
*reg
, u32 value
)
213 static void ql_write_nvram_reg(struct ql3_adapter
*qdev
,
214 u32 __iomem
*reg
, u32 value
)
222 static void ql_write_page0_reg(struct ql3_adapter
*qdev
,
223 u32 __iomem
*reg
, u32 value
)
225 if (qdev
->current_page
!= 0)
226 ql_set_register_page(qdev
,0);
233 * Caller holds hw_lock. Only called during init.
235 static void ql_write_page1_reg(struct ql3_adapter
*qdev
,
236 u32 __iomem
*reg
, u32 value
)
238 if (qdev
->current_page
!= 1)
239 ql_set_register_page(qdev
,1);
246 * Caller holds hw_lock. Only called during init.
248 static void ql_write_page2_reg(struct ql3_adapter
*qdev
,
249 u32 __iomem
*reg
, u32 value
)
251 if (qdev
->current_page
!= 2)
252 ql_set_register_page(qdev
,2);
258 static void ql_disable_interrupts(struct ql3_adapter
*qdev
)
260 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
262 ql_write_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispInterruptMaskReg
,
263 (ISP_IMR_ENABLE_INT
<< 16));
267 static void ql_enable_interrupts(struct ql3_adapter
*qdev
)
269 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
271 ql_write_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispInterruptMaskReg
,
272 ((0xff << 16) | ISP_IMR_ENABLE_INT
));
276 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter
*qdev
,
277 struct ql_rcv_buf_cb
*lrg_buf_cb
)
280 lrg_buf_cb
->next
= NULL
;
282 if (qdev
->lrg_buf_free_tail
== NULL
) { /* The list is empty */
283 qdev
->lrg_buf_free_head
= qdev
->lrg_buf_free_tail
= lrg_buf_cb
;
285 qdev
->lrg_buf_free_tail
->next
= lrg_buf_cb
;
286 qdev
->lrg_buf_free_tail
= lrg_buf_cb
;
289 if (!lrg_buf_cb
->skb
) {
290 lrg_buf_cb
->skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
291 if (unlikely(!lrg_buf_cb
->skb
)) {
292 printk(KERN_ERR PFX
"%s: failed dev_alloc_skb().\n",
294 qdev
->lrg_buf_skb_check
++;
297 * We save some space to copy the ethhdr from first
300 skb_reserve(lrg_buf_cb
->skb
, QL_HEADER_SPACE
);
301 map
= pci_map_single(qdev
->pdev
,
302 lrg_buf_cb
->skb
->data
,
303 qdev
->lrg_buffer_len
-
306 lrg_buf_cb
->buf_phy_addr_low
=
307 cpu_to_le32(LS_64BITS(map
));
308 lrg_buf_cb
->buf_phy_addr_high
=
309 cpu_to_le32(MS_64BITS(map
));
310 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
311 pci_unmap_len_set(lrg_buf_cb
, maplen
,
312 qdev
->lrg_buffer_len
-
317 qdev
->lrg_buf_free_count
++;
320 static struct ql_rcv_buf_cb
*ql_get_from_lrg_buf_free_list(struct ql3_adapter
323 struct ql_rcv_buf_cb
*lrg_buf_cb
;
325 if ((lrg_buf_cb
= qdev
->lrg_buf_free_head
) != NULL
) {
326 if ((qdev
->lrg_buf_free_head
= lrg_buf_cb
->next
) == NULL
)
327 qdev
->lrg_buf_free_tail
= NULL
;
328 qdev
->lrg_buf_free_count
--;
334 static u32 addrBits
= EEPROM_NO_ADDR_BITS
;
335 static u32 dataBits
= EEPROM_NO_DATA_BITS
;
337 static void fm93c56a_deselect(struct ql3_adapter
*qdev
);
338 static void eeprom_readword(struct ql3_adapter
*qdev
, u32 eepromAddr
,
339 unsigned short *value
);
342 * Caller holds hw_lock.
344 static void fm93c56a_select(struct ql3_adapter
*qdev
)
346 struct ql3xxx_port_registers __iomem
*port_regs
=
347 qdev
->mem_map_registers
;
349 qdev
->eeprom_cmd_data
= AUBURN_EEPROM_CS_1
;
350 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
351 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
);
352 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
353 ((ISP_NVRAM_MASK
<< 16) | qdev
->eeprom_cmd_data
));
357 * Caller holds hw_lock.
359 static void fm93c56a_cmd(struct ql3_adapter
*qdev
, u32 cmd
, u32 eepromAddr
)
365 struct ql3xxx_port_registers __iomem
*port_regs
=
366 qdev
->mem_map_registers
;
368 /* Clock in a zero, then do the start bit */
369 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
370 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
372 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
373 ISP_NVRAM_MASK
| qdev
->
374 eeprom_cmd_data
| AUBURN_EEPROM_DO_1
|
375 AUBURN_EEPROM_CLK_RISE
);
376 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
377 ISP_NVRAM_MASK
| qdev
->
378 eeprom_cmd_data
| AUBURN_EEPROM_DO_1
|
379 AUBURN_EEPROM_CLK_FALL
);
381 mask
= 1 << (FM93C56A_CMD_BITS
- 1);
382 /* Force the previous data bit to be different */
383 previousBit
= 0xffff;
384 for (i
= 0; i
< FM93C56A_CMD_BITS
; i
++) {
386 (cmd
& mask
) ? AUBURN_EEPROM_DO_1
: AUBURN_EEPROM_DO_0
;
387 if (previousBit
!= dataBit
) {
389 * If the bit changed, then change the DO state to
392 ql_write_nvram_reg(qdev
,
393 &port_regs
->CommonRegs
.
394 serialPortInterfaceReg
,
395 ISP_NVRAM_MASK
| qdev
->
396 eeprom_cmd_data
| dataBit
);
397 previousBit
= dataBit
;
399 ql_write_nvram_reg(qdev
,
400 &port_regs
->CommonRegs
.
401 serialPortInterfaceReg
,
402 ISP_NVRAM_MASK
| qdev
->
403 eeprom_cmd_data
| dataBit
|
404 AUBURN_EEPROM_CLK_RISE
);
405 ql_write_nvram_reg(qdev
,
406 &port_regs
->CommonRegs
.
407 serialPortInterfaceReg
,
408 ISP_NVRAM_MASK
| qdev
->
409 eeprom_cmd_data
| dataBit
|
410 AUBURN_EEPROM_CLK_FALL
);
414 mask
= 1 << (addrBits
- 1);
415 /* Force the previous data bit to be different */
416 previousBit
= 0xffff;
417 for (i
= 0; i
< addrBits
; i
++) {
419 (eepromAddr
& mask
) ? AUBURN_EEPROM_DO_1
:
421 if (previousBit
!= dataBit
) {
423 * If the bit changed, then change the DO state to
426 ql_write_nvram_reg(qdev
,
427 &port_regs
->CommonRegs
.
428 serialPortInterfaceReg
,
429 ISP_NVRAM_MASK
| qdev
->
430 eeprom_cmd_data
| dataBit
);
431 previousBit
= dataBit
;
433 ql_write_nvram_reg(qdev
,
434 &port_regs
->CommonRegs
.
435 serialPortInterfaceReg
,
436 ISP_NVRAM_MASK
| qdev
->
437 eeprom_cmd_data
| dataBit
|
438 AUBURN_EEPROM_CLK_RISE
);
439 ql_write_nvram_reg(qdev
,
440 &port_regs
->CommonRegs
.
441 serialPortInterfaceReg
,
442 ISP_NVRAM_MASK
| qdev
->
443 eeprom_cmd_data
| dataBit
|
444 AUBURN_EEPROM_CLK_FALL
);
445 eepromAddr
= eepromAddr
<< 1;
450 * Caller holds hw_lock.
452 static void fm93c56a_deselect(struct ql3_adapter
*qdev
)
454 struct ql3xxx_port_registers __iomem
*port_regs
=
455 qdev
->mem_map_registers
;
456 qdev
->eeprom_cmd_data
= AUBURN_EEPROM_CS_0
;
457 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
458 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
);
462 * Caller holds hw_lock.
464 static void fm93c56a_datain(struct ql3_adapter
*qdev
, unsigned short *value
)
469 struct ql3xxx_port_registers __iomem
*port_regs
=
470 qdev
->mem_map_registers
;
472 /* Read the data bits */
473 /* The first bit is a dummy. Clock right over it. */
474 for (i
= 0; i
< dataBits
; i
++) {
475 ql_write_nvram_reg(qdev
,
476 &port_regs
->CommonRegs
.
477 serialPortInterfaceReg
,
478 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
479 AUBURN_EEPROM_CLK_RISE
);
480 ql_write_nvram_reg(qdev
,
481 &port_regs
->CommonRegs
.
482 serialPortInterfaceReg
,
483 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
484 AUBURN_EEPROM_CLK_FALL
);
488 &port_regs
->CommonRegs
.
489 serialPortInterfaceReg
) & AUBURN_EEPROM_DI_1
) ? 1 : 0;
490 data
= (data
<< 1) | dataBit
;
496 * Caller holds hw_lock.
498 static void eeprom_readword(struct ql3_adapter
*qdev
,
499 u32 eepromAddr
, unsigned short *value
)
501 fm93c56a_select(qdev
);
502 fm93c56a_cmd(qdev
, (int)FM93C56A_READ
, eepromAddr
);
503 fm93c56a_datain(qdev
, value
);
504 fm93c56a_deselect(qdev
);
507 static void ql_swap_mac_addr(u8
* macAddress
)
511 temp
= macAddress
[0];
512 macAddress
[0] = macAddress
[1];
513 macAddress
[1] = temp
;
514 temp
= macAddress
[2];
515 macAddress
[2] = macAddress
[3];
516 macAddress
[3] = temp
;
517 temp
= macAddress
[4];
518 macAddress
[4] = macAddress
[5];
519 macAddress
[5] = temp
;
523 static int ql_get_nvram_params(struct ql3_adapter
*qdev
)
528 unsigned long hw_flags
;
530 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
532 pEEPROMData
= (u16
*) & qdev
->nvram_data
;
533 qdev
->eeprom_cmd_data
= 0;
534 if(ql_sem_spinlock(qdev
, QL_NVRAM_SEM_MASK
,
535 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
537 printk(KERN_ERR PFX
"%s: Failed ql_sem_spinlock().\n",
539 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
543 for (index
= 0; index
< EEPROM_SIZE
; index
++) {
544 eeprom_readword(qdev
, index
, pEEPROMData
);
545 checksum
+= *pEEPROMData
;
548 ql_sem_unlock(qdev
, QL_NVRAM_SEM_MASK
);
551 printk(KERN_ERR PFX
"%s: checksum should be zero, is %x!!\n",
552 qdev
->ndev
->name
, checksum
);
553 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
558 * We have a problem with endianness for the MAC addresses
559 * and the two 8-bit values version, and numPorts. We
560 * have to swap them on big endian systems.
562 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn0
.macAddress
);
563 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn1
.macAddress
);
564 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn2
.macAddress
);
565 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn3
.macAddress
);
566 pEEPROMData
= (u16
*) & qdev
->nvram_data
.version
;
567 *pEEPROMData
= le16_to_cpu(*pEEPROMData
);
569 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
573 static const u32 PHYAddr
[2] = {
574 PORT0_PHY_ADDRESS
, PORT1_PHY_ADDRESS
577 static int ql_wait_for_mii_ready(struct ql3_adapter
*qdev
)
579 struct ql3xxx_port_registers __iomem
*port_regs
=
580 qdev
->mem_map_registers
;
585 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIStatusReg
);
586 if (!(temp
& MAC_MII_STATUS_BSY
))
594 static void ql_mii_enable_scan_mode(struct ql3_adapter
*qdev
)
596 struct ql3xxx_port_registers __iomem
*port_regs
=
597 qdev
->mem_map_registers
;
600 if (qdev
->numPorts
> 1) {
601 /* Auto scan will cycle through multiple ports */
602 scanControl
= MAC_MII_CONTROL_AS
| MAC_MII_CONTROL_SC
;
604 scanControl
= MAC_MII_CONTROL_SC
;
608 * Scan register 1 of PHY/PETBI,
609 * Set up to scan both devices
610 * The autoscan starts from the first register, completes
611 * the last one before rolling over to the first
613 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
614 PHYAddr
[0] | MII_SCAN_REGISTER
);
616 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
618 ((MAC_MII_CONTROL_SC
| MAC_MII_CONTROL_AS
) << 16));
621 static u8
ql_mii_disable_scan_mode(struct ql3_adapter
*qdev
)
624 struct ql3xxx_port_registers __iomem
*port_regs
=
625 qdev
->mem_map_registers
;
627 /* See if scan mode is enabled before we turn it off */
628 if (ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
) &
629 (MAC_MII_CONTROL_AS
| MAC_MII_CONTROL_SC
)) {
630 /* Scan is enabled */
633 /* Scan is disabled */
638 * When disabling scan mode you must first change the MII register
641 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
642 PHYAddr
[0] | MII_SCAN_REGISTER
);
644 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
645 ((MAC_MII_CONTROL_SC
| MAC_MII_CONTROL_AS
|
646 MAC_MII_CONTROL_RC
) << 16));
651 static int ql_mii_write_reg_ex(struct ql3_adapter
*qdev
,
652 u16 regAddr
, u16 value
, u32 mac_index
)
654 struct ql3xxx_port_registers __iomem
*port_regs
=
655 qdev
->mem_map_registers
;
658 scanWasEnabled
= ql_mii_disable_scan_mode(qdev
);
660 if (ql_wait_for_mii_ready(qdev
)) {
661 if (netif_msg_link(qdev
))
662 printk(KERN_WARNING PFX
663 "%s Timed out waiting for management port to "
664 "get free before issuing command.\n",
669 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
670 PHYAddr
[mac_index
] | regAddr
);
672 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
, value
);
674 /* Wait for write to complete 9/10/04 SJP */
675 if (ql_wait_for_mii_ready(qdev
)) {
676 if (netif_msg_link(qdev
))
677 printk(KERN_WARNING PFX
678 "%s: Timed out waiting for management port to"
679 "get free before issuing command.\n",
685 ql_mii_enable_scan_mode(qdev
);
690 static int ql_mii_read_reg_ex(struct ql3_adapter
*qdev
, u16 regAddr
,
691 u16
* value
, u32 mac_index
)
693 struct ql3xxx_port_registers __iomem
*port_regs
=
694 qdev
->mem_map_registers
;
698 scanWasEnabled
= ql_mii_disable_scan_mode(qdev
);
700 if (ql_wait_for_mii_ready(qdev
)) {
701 if (netif_msg_link(qdev
))
702 printk(KERN_WARNING PFX
703 "%s: Timed out waiting for management port to "
704 "get free before issuing command.\n",
709 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
710 PHYAddr
[mac_index
] | regAddr
);
712 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
713 (MAC_MII_CONTROL_RC
<< 16));
715 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
716 (MAC_MII_CONTROL_RC
<< 16) | MAC_MII_CONTROL_RC
);
718 /* Wait for the read to complete */
719 if (ql_wait_for_mii_ready(qdev
)) {
720 if (netif_msg_link(qdev
))
721 printk(KERN_WARNING PFX
722 "%s: Timed out waiting for management port to "
723 "get free after issuing command.\n",
728 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
);
732 ql_mii_enable_scan_mode(qdev
);
737 static int ql_mii_write_reg(struct ql3_adapter
*qdev
, u16 regAddr
, u16 value
)
739 struct ql3xxx_port_registers __iomem
*port_regs
=
740 qdev
->mem_map_registers
;
742 ql_mii_disable_scan_mode(qdev
);
744 if (ql_wait_for_mii_ready(qdev
)) {
745 if (netif_msg_link(qdev
))
746 printk(KERN_WARNING PFX
747 "%s: Timed out waiting for management port to "
748 "get free before issuing command.\n",
753 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
754 qdev
->PHYAddr
| regAddr
);
756 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
, value
);
758 /* Wait for write to complete. */
759 if (ql_wait_for_mii_ready(qdev
)) {
760 if (netif_msg_link(qdev
))
761 printk(KERN_WARNING PFX
762 "%s: Timed out waiting for management port to "
763 "get free before issuing command.\n",
768 ql_mii_enable_scan_mode(qdev
);
773 static int ql_mii_read_reg(struct ql3_adapter
*qdev
, u16 regAddr
, u16
*value
)
776 struct ql3xxx_port_registers __iomem
*port_regs
=
777 qdev
->mem_map_registers
;
779 ql_mii_disable_scan_mode(qdev
);
781 if (ql_wait_for_mii_ready(qdev
)) {
782 if (netif_msg_link(qdev
))
783 printk(KERN_WARNING PFX
784 "%s: Timed out waiting for management port to "
785 "get free before issuing command.\n",
790 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
791 qdev
->PHYAddr
| regAddr
);
793 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
794 (MAC_MII_CONTROL_RC
<< 16));
796 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
797 (MAC_MII_CONTROL_RC
<< 16) | MAC_MII_CONTROL_RC
);
799 /* Wait for the read to complete */
800 if (ql_wait_for_mii_ready(qdev
)) {
801 if (netif_msg_link(qdev
))
802 printk(KERN_WARNING PFX
803 "%s: Timed out waiting for management port to "
804 "get free before issuing command.\n",
809 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
);
812 ql_mii_enable_scan_mode(qdev
);
817 static void ql_petbi_reset(struct ql3_adapter
*qdev
)
819 ql_mii_write_reg(qdev
, PETBI_CONTROL_REG
, PETBI_CTRL_SOFT_RESET
);
822 static void ql_petbi_start_neg(struct ql3_adapter
*qdev
)
826 /* Enable Auto-negotiation sense */
827 ql_mii_read_reg(qdev
, PETBI_TBI_CTRL
, ®
);
828 reg
|= PETBI_TBI_AUTO_SENSE
;
829 ql_mii_write_reg(qdev
, PETBI_TBI_CTRL
, reg
);
831 ql_mii_write_reg(qdev
, PETBI_NEG_ADVER
,
832 PETBI_NEG_PAUSE
| PETBI_NEG_DUPLEX
);
834 ql_mii_write_reg(qdev
, PETBI_CONTROL_REG
,
835 PETBI_CTRL_AUTO_NEG
| PETBI_CTRL_RESTART_NEG
|
836 PETBI_CTRL_FULL_DUPLEX
| PETBI_CTRL_SPEED_1000
);
840 static void ql_petbi_reset_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
842 ql_mii_write_reg_ex(qdev
, PETBI_CONTROL_REG
, PETBI_CTRL_SOFT_RESET
,
846 static void ql_petbi_start_neg_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
850 /* Enable Auto-negotiation sense */
851 ql_mii_read_reg_ex(qdev
, PETBI_TBI_CTRL
, ®
, mac_index
);
852 reg
|= PETBI_TBI_AUTO_SENSE
;
853 ql_mii_write_reg_ex(qdev
, PETBI_TBI_CTRL
, reg
, mac_index
);
855 ql_mii_write_reg_ex(qdev
, PETBI_NEG_ADVER
,
856 PETBI_NEG_PAUSE
| PETBI_NEG_DUPLEX
, mac_index
);
858 ql_mii_write_reg_ex(qdev
, PETBI_CONTROL_REG
,
859 PETBI_CTRL_AUTO_NEG
| PETBI_CTRL_RESTART_NEG
|
860 PETBI_CTRL_FULL_DUPLEX
| PETBI_CTRL_SPEED_1000
,
864 static void ql_petbi_init(struct ql3_adapter
*qdev
)
866 ql_petbi_reset(qdev
);
867 ql_petbi_start_neg(qdev
);
870 static void ql_petbi_init_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
872 ql_petbi_reset_ex(qdev
, mac_index
);
873 ql_petbi_start_neg_ex(qdev
, mac_index
);
876 static int ql_is_petbi_neg_pause(struct ql3_adapter
*qdev
)
880 if (ql_mii_read_reg(qdev
, PETBI_NEG_PARTNER
, ®
) < 0)
883 return (reg
& PETBI_NEG_PAUSE_MASK
) == PETBI_NEG_PAUSE
;
886 static int ql_phy_get_speed(struct ql3_adapter
*qdev
)
890 if (ql_mii_read_reg(qdev
, AUX_CONTROL_STATUS
, ®
) < 0)
893 reg
= (((reg
& 0x18) >> 3) & 3);
905 static int ql_is_full_dup(struct ql3_adapter
*qdev
)
909 if (ql_mii_read_reg(qdev
, AUX_CONTROL_STATUS
, ®
) < 0)
912 return (reg
& PHY_AUX_DUPLEX_STAT
) != 0;
915 static int ql_is_phy_neg_pause(struct ql3_adapter
*qdev
)
919 if (ql_mii_read_reg(qdev
, PHY_NEG_PARTNER
, ®
) < 0)
922 return (reg
& PHY_NEG_PAUSE
) != 0;
926 * Caller holds hw_lock.
928 static void ql_mac_enable(struct ql3_adapter
*qdev
, u32 enable
)
930 struct ql3xxx_port_registers __iomem
*port_regs
=
931 qdev
->mem_map_registers
;
935 value
= (MAC_CONFIG_REG_PE
| (MAC_CONFIG_REG_PE
<< 16));
937 value
= (MAC_CONFIG_REG_PE
<< 16);
940 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
942 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
946 * Caller holds hw_lock.
948 static void ql_mac_cfg_soft_reset(struct ql3_adapter
*qdev
, u32 enable
)
950 struct ql3xxx_port_registers __iomem
*port_regs
=
951 qdev
->mem_map_registers
;
955 value
= (MAC_CONFIG_REG_SR
| (MAC_CONFIG_REG_SR
<< 16));
957 value
= (MAC_CONFIG_REG_SR
<< 16);
960 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
962 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
966 * Caller holds hw_lock.
968 static void ql_mac_cfg_gig(struct ql3_adapter
*qdev
, u32 enable
)
970 struct ql3xxx_port_registers __iomem
*port_regs
=
971 qdev
->mem_map_registers
;
975 value
= (MAC_CONFIG_REG_GM
| (MAC_CONFIG_REG_GM
<< 16));
977 value
= (MAC_CONFIG_REG_GM
<< 16);
980 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
982 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
986 * Caller holds hw_lock.
988 static void ql_mac_cfg_full_dup(struct ql3_adapter
*qdev
, u32 enable
)
990 struct ql3xxx_port_registers __iomem
*port_regs
=
991 qdev
->mem_map_registers
;
995 value
= (MAC_CONFIG_REG_FD
| (MAC_CONFIG_REG_FD
<< 16));
997 value
= (MAC_CONFIG_REG_FD
<< 16);
1000 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
1002 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
1006 * Caller holds hw_lock.
1008 static void ql_mac_cfg_pause(struct ql3_adapter
*qdev
, u32 enable
)
1010 struct ql3xxx_port_registers __iomem
*port_regs
=
1011 qdev
->mem_map_registers
;
1016 ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) |
1017 ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) << 16));
1019 value
= ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) << 16);
1021 if (qdev
->mac_index
)
1022 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
1024 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
1028 * Caller holds hw_lock.
1030 static int ql_is_fiber(struct ql3_adapter
*qdev
)
1032 struct ql3xxx_port_registers __iomem
*port_regs
=
1033 qdev
->mem_map_registers
;
1037 switch (qdev
->mac_index
) {
1039 bitToCheck
= PORT_STATUS_SM0
;
1042 bitToCheck
= PORT_STATUS_SM1
;
1046 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1047 return (temp
& bitToCheck
) != 0;
1050 static int ql_is_auto_cfg(struct ql3_adapter
*qdev
)
1053 ql_mii_read_reg(qdev
, 0x00, ®
);
1054 return (reg
& 0x1000) != 0;
1058 * Caller holds hw_lock.
1060 static int ql_is_auto_neg_complete(struct ql3_adapter
*qdev
)
1062 struct ql3xxx_port_registers __iomem
*port_regs
=
1063 qdev
->mem_map_registers
;
1067 switch (qdev
->mac_index
) {
1069 bitToCheck
= PORT_STATUS_AC0
;
1072 bitToCheck
= PORT_STATUS_AC1
;
1076 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1077 if (temp
& bitToCheck
) {
1078 if (netif_msg_link(qdev
))
1079 printk(KERN_INFO PFX
1080 "%s: Auto-Negotiate complete.\n",
1084 if (netif_msg_link(qdev
))
1085 printk(KERN_WARNING PFX
1086 "%s: Auto-Negotiate incomplete.\n",
1093 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1095 static int ql_is_neg_pause(struct ql3_adapter
*qdev
)
1097 if (ql_is_fiber(qdev
))
1098 return ql_is_petbi_neg_pause(qdev
);
1100 return ql_is_phy_neg_pause(qdev
);
1103 static int ql_auto_neg_error(struct ql3_adapter
*qdev
)
1105 struct ql3xxx_port_registers __iomem
*port_regs
=
1106 qdev
->mem_map_registers
;
1110 switch (qdev
->mac_index
) {
1112 bitToCheck
= PORT_STATUS_AE0
;
1115 bitToCheck
= PORT_STATUS_AE1
;
1118 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1119 return (temp
& bitToCheck
) != 0;
1122 static u32
ql_get_link_speed(struct ql3_adapter
*qdev
)
1124 if (ql_is_fiber(qdev
))
1127 return ql_phy_get_speed(qdev
);
1130 static int ql_is_link_full_dup(struct ql3_adapter
*qdev
)
1132 if (ql_is_fiber(qdev
))
1135 return ql_is_full_dup(qdev
);
1139 * Caller holds hw_lock.
1141 static int ql_link_down_detect(struct ql3_adapter
*qdev
)
1143 struct ql3xxx_port_registers __iomem
*port_regs
=
1144 qdev
->mem_map_registers
;
1148 switch (qdev
->mac_index
) {
1150 bitToCheck
= ISP_CONTROL_LINK_DN_0
;
1153 bitToCheck
= ISP_CONTROL_LINK_DN_1
;
1158 ql_read_common_reg(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
1159 return (temp
& bitToCheck
) != 0;
1163 * Caller holds hw_lock.
1165 static int ql_link_down_detect_clear(struct ql3_adapter
*qdev
)
1167 struct ql3xxx_port_registers __iomem
*port_regs
=
1168 qdev
->mem_map_registers
;
1170 switch (qdev
->mac_index
) {
1172 ql_write_common_reg(qdev
,
1173 &port_regs
->CommonRegs
.ispControlStatus
,
1174 (ISP_CONTROL_LINK_DN_0
) |
1175 (ISP_CONTROL_LINK_DN_0
<< 16));
1179 ql_write_common_reg(qdev
,
1180 &port_regs
->CommonRegs
.ispControlStatus
,
1181 (ISP_CONTROL_LINK_DN_1
) |
1182 (ISP_CONTROL_LINK_DN_1
<< 16));
1193 * Caller holds hw_lock.
1195 static int ql_this_adapter_controls_port(struct ql3_adapter
*qdev
,
1198 struct ql3xxx_port_registers __iomem
*port_regs
=
1199 qdev
->mem_map_registers
;
1203 switch (mac_index
) {
1205 bitToCheck
= PORT_STATUS_F1_ENABLED
;
1208 bitToCheck
= PORT_STATUS_F3_ENABLED
;
1214 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1215 if (temp
& bitToCheck
) {
1216 if (netif_msg_link(qdev
))
1217 printk(KERN_DEBUG PFX
1218 "%s: is not link master.\n", qdev
->ndev
->name
);
1221 if (netif_msg_link(qdev
))
1222 printk(KERN_DEBUG PFX
1223 "%s: is link master.\n", qdev
->ndev
->name
);
1228 static void ql_phy_reset_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1230 ql_mii_write_reg_ex(qdev
, CONTROL_REG
, PHY_CTRL_SOFT_RESET
, mac_index
);
1233 static void ql_phy_start_neg_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1237 ql_mii_write_reg_ex(qdev
, PHY_NEG_ADVER
,
1238 PHY_NEG_PAUSE
| PHY_NEG_ADV_SPEED
| 1, mac_index
);
1240 ql_mii_read_reg_ex(qdev
, CONTROL_REG
, ®
, mac_index
);
1241 ql_mii_write_reg_ex(qdev
, CONTROL_REG
, reg
| PHY_CTRL_RESTART_NEG
,
1245 static void ql_phy_init_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1247 ql_phy_reset_ex(qdev
, mac_index
);
1248 ql_phy_start_neg_ex(qdev
, mac_index
);
1252 * Caller holds hw_lock.
1254 static u32
ql_get_link_state(struct ql3_adapter
*qdev
)
1256 struct ql3xxx_port_registers __iomem
*port_regs
=
1257 qdev
->mem_map_registers
;
1259 u32 temp
, linkState
;
1261 switch (qdev
->mac_index
) {
1263 bitToCheck
= PORT_STATUS_UP0
;
1266 bitToCheck
= PORT_STATUS_UP1
;
1269 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1270 if (temp
& bitToCheck
) {
1273 linkState
= LS_DOWN
;
1274 if (netif_msg_link(qdev
))
1275 printk(KERN_WARNING PFX
1276 "%s: Link is down.\n", qdev
->ndev
->name
);
1281 static int ql_port_start(struct ql3_adapter
*qdev
)
1283 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1284 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1288 if (ql_is_fiber(qdev
)) {
1289 ql_petbi_init(qdev
);
1292 ql_phy_init_ex(qdev
, qdev
->mac_index
);
1295 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1299 static int ql_finish_auto_neg(struct ql3_adapter
*qdev
)
1302 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1303 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1307 if (!ql_auto_neg_error(qdev
)) {
1308 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1309 /* configure the MAC */
1310 if (netif_msg_link(qdev
))
1311 printk(KERN_DEBUG PFX
1312 "%s: Configuring link.\n",
1315 ql_mac_cfg_soft_reset(qdev
, 1);
1316 ql_mac_cfg_gig(qdev
,
1320 ql_mac_cfg_full_dup(qdev
,
1323 ql_mac_cfg_pause(qdev
,
1326 ql_mac_cfg_soft_reset(qdev
, 0);
1328 /* enable the MAC */
1329 if (netif_msg_link(qdev
))
1330 printk(KERN_DEBUG PFX
1331 "%s: Enabling mac.\n",
1334 ql_mac_enable(qdev
, 1);
1337 if (netif_msg_link(qdev
))
1338 printk(KERN_DEBUG PFX
1339 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1341 qdev
->port_link_state
= LS_UP
;
1342 netif_start_queue(qdev
->ndev
);
1343 netif_carrier_on(qdev
->ndev
);
1344 if (netif_msg_link(qdev
))
1345 printk(KERN_INFO PFX
1346 "%s: Link is up at %d Mbps, %s duplex.\n",
1348 ql_get_link_speed(qdev
),
1349 ql_is_link_full_dup(qdev
)
1352 } else { /* Remote error detected */
1354 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1355 if (netif_msg_link(qdev
))
1356 printk(KERN_DEBUG PFX
1357 "%s: Remote error detected. "
1358 "Calling ql_port_start().\n",
1362 * ql_port_start() is shared code and needs
1363 * to lock the PHY on it's own.
1365 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1366 if(ql_port_start(qdev
)) {/* Restart port */
1372 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1376 static void ql_link_state_machine(struct ql3_adapter
*qdev
)
1378 u32 curr_link_state
;
1379 unsigned long hw_flags
;
1381 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1383 curr_link_state
= ql_get_link_state(qdev
);
1385 if (test_bit(QL_RESET_ACTIVE
,&qdev
->flags
)) {
1386 if (netif_msg_link(qdev
))
1387 printk(KERN_INFO PFX
1388 "%s: Reset in progress, skip processing link "
1389 "state.\n", qdev
->ndev
->name
);
1393 switch (qdev
->port_link_state
) {
1395 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1396 ql_port_start(qdev
);
1398 qdev
->port_link_state
= LS_DOWN
;
1402 if (netif_msg_link(qdev
))
1403 printk(KERN_DEBUG PFX
1404 "%s: port_link_state = LS_DOWN.\n",
1406 if (curr_link_state
== LS_UP
) {
1407 if (netif_msg_link(qdev
))
1408 printk(KERN_DEBUG PFX
1409 "%s: curr_link_state = LS_UP.\n",
1411 if (ql_is_auto_neg_complete(qdev
))
1412 ql_finish_auto_neg(qdev
);
1414 if (qdev
->port_link_state
== LS_UP
)
1415 ql_link_down_detect_clear(qdev
);
1422 * See if the link is currently down or went down and came
1425 if ((curr_link_state
== LS_DOWN
) || ql_link_down_detect(qdev
)) {
1426 if (netif_msg_link(qdev
))
1427 printk(KERN_INFO PFX
"%s: Link is down.\n",
1429 qdev
->port_link_state
= LS_DOWN
;
1433 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1437 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1439 static void ql_get_phy_owner(struct ql3_adapter
*qdev
)
1441 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1442 set_bit(QL_LINK_MASTER
,&qdev
->flags
);
1444 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
1448 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1450 static void ql_init_scan_mode(struct ql3_adapter
*qdev
)
1452 ql_mii_enable_scan_mode(qdev
);
1454 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1455 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1456 ql_petbi_init_ex(qdev
, qdev
->mac_index
);
1458 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1459 ql_phy_init_ex(qdev
, qdev
->mac_index
);
1464 * MII_Setup needs to be called before taking the PHY out of reset so that the
1465 * management interface clock speed can be set properly. It would be better if
1466 * we had a way to disable MDC until after the PHY is out of reset, but we
1467 * don't have that capability.
1469 static int ql_mii_setup(struct ql3_adapter
*qdev
)
1472 struct ql3xxx_port_registers __iomem
*port_regs
=
1473 qdev
->mem_map_registers
;
1475 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1476 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1480 if (qdev
->device_id
== QL3032_DEVICE_ID
)
1481 ql_write_page0_reg(qdev
,
1482 &port_regs
->macMIIMgmtControlReg
, 0x0f00000);
1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1485 reg
= MAC_MII_CONTROL_CLK_SEL_DIV28
;
1487 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
1488 reg
| ((MAC_MII_CONTROL_CLK_SEL_MASK
) << 16));
1490 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1494 static u32
ql_supported_modes(struct ql3_adapter
*qdev
)
1498 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1499 supported
= SUPPORTED_1000baseT_Full
| SUPPORTED_FIBRE
1500 | SUPPORTED_Autoneg
;
1502 supported
= SUPPORTED_10baseT_Half
1503 | SUPPORTED_10baseT_Full
1504 | SUPPORTED_100baseT_Half
1505 | SUPPORTED_100baseT_Full
1506 | SUPPORTED_1000baseT_Half
1507 | SUPPORTED_1000baseT_Full
1508 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
1514 static int ql_get_auto_cfg_status(struct ql3_adapter
*qdev
)
1517 unsigned long hw_flags
;
1518 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1519 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1520 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1523 status
= ql_is_auto_cfg(qdev
);
1524 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1525 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1529 static u32
ql_get_speed(struct ql3_adapter
*qdev
)
1532 unsigned long hw_flags
;
1533 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1534 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1535 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1538 status
= ql_get_link_speed(qdev
);
1539 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1540 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1544 static int ql_get_full_dup(struct ql3_adapter
*qdev
)
1547 unsigned long hw_flags
;
1548 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1549 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1550 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1553 status
= ql_is_link_full_dup(qdev
);
1554 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1555 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1560 static int ql_get_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
1562 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1564 ecmd
->transceiver
= XCVR_INTERNAL
;
1565 ecmd
->supported
= ql_supported_modes(qdev
);
1567 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1568 ecmd
->port
= PORT_FIBRE
;
1570 ecmd
->port
= PORT_TP
;
1571 ecmd
->phy_address
= qdev
->PHYAddr
;
1573 ecmd
->advertising
= ql_supported_modes(qdev
);
1574 ecmd
->autoneg
= ql_get_auto_cfg_status(qdev
);
1575 ecmd
->speed
= ql_get_speed(qdev
);
1576 ecmd
->duplex
= ql_get_full_dup(qdev
);
1580 static void ql_get_drvinfo(struct net_device
*ndev
,
1581 struct ethtool_drvinfo
*drvinfo
)
1583 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1584 strncpy(drvinfo
->driver
, ql3xxx_driver_name
, 32);
1585 strncpy(drvinfo
->version
, ql3xxx_driver_version
, 32);
1586 strncpy(drvinfo
->fw_version
, "N/A", 32);
1587 strncpy(drvinfo
->bus_info
, pci_name(qdev
->pdev
), 32);
1588 drvinfo
->n_stats
= 0;
1589 drvinfo
->testinfo_len
= 0;
1590 drvinfo
->regdump_len
= 0;
1591 drvinfo
->eedump_len
= 0;
1594 static u32
ql_get_msglevel(struct net_device
*ndev
)
1596 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1597 return qdev
->msg_enable
;
1600 static void ql_set_msglevel(struct net_device
*ndev
, u32 value
)
1602 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1603 qdev
->msg_enable
= value
;
1606 static const struct ethtool_ops ql3xxx_ethtool_ops
= {
1607 .get_settings
= ql_get_settings
,
1608 .get_drvinfo
= ql_get_drvinfo
,
1609 .get_perm_addr
= ethtool_op_get_perm_addr
,
1610 .get_link
= ethtool_op_get_link
,
1611 .get_msglevel
= ql_get_msglevel
,
1612 .set_msglevel
= ql_set_msglevel
,
1615 static int ql_populate_free_queue(struct ql3_adapter
*qdev
)
1617 struct ql_rcv_buf_cb
*lrg_buf_cb
= qdev
->lrg_buf_free_head
;
1620 while (lrg_buf_cb
) {
1621 if (!lrg_buf_cb
->skb
) {
1622 lrg_buf_cb
->skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
1623 if (unlikely(!lrg_buf_cb
->skb
)) {
1624 printk(KERN_DEBUG PFX
1625 "%s: Failed dev_alloc_skb().\n",
1630 * We save some space to copy the ethhdr from
1633 skb_reserve(lrg_buf_cb
->skb
, QL_HEADER_SPACE
);
1634 map
= pci_map_single(qdev
->pdev
,
1635 lrg_buf_cb
->skb
->data
,
1636 qdev
->lrg_buffer_len
-
1638 PCI_DMA_FROMDEVICE
);
1639 lrg_buf_cb
->buf_phy_addr_low
=
1640 cpu_to_le32(LS_64BITS(map
));
1641 lrg_buf_cb
->buf_phy_addr_high
=
1642 cpu_to_le32(MS_64BITS(map
));
1643 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
1644 pci_unmap_len_set(lrg_buf_cb
, maplen
,
1645 qdev
->lrg_buffer_len
-
1647 --qdev
->lrg_buf_skb_check
;
1648 if (!qdev
->lrg_buf_skb_check
)
1652 lrg_buf_cb
= lrg_buf_cb
->next
;
1658 * Caller holds hw_lock.
1660 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter
*qdev
)
1662 struct bufq_addr_element
*lrg_buf_q_ele
;
1664 struct ql_rcv_buf_cb
*lrg_buf_cb
;
1665 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
1667 if ((qdev
->lrg_buf_free_count
>= 8)
1668 && (qdev
->lrg_buf_release_cnt
>= 16)) {
1670 if (qdev
->lrg_buf_skb_check
)
1671 if (!ql_populate_free_queue(qdev
))
1674 lrg_buf_q_ele
= qdev
->lrg_buf_next_free
;
1676 while ((qdev
->lrg_buf_release_cnt
>= 16)
1677 && (qdev
->lrg_buf_free_count
>= 8)) {
1679 for (i
= 0; i
< 8; i
++) {
1681 ql_get_from_lrg_buf_free_list(qdev
);
1682 lrg_buf_q_ele
->addr_high
=
1683 lrg_buf_cb
->buf_phy_addr_high
;
1684 lrg_buf_q_ele
->addr_low
=
1685 lrg_buf_cb
->buf_phy_addr_low
;
1688 qdev
->lrg_buf_release_cnt
--;
1691 qdev
->lrg_buf_q_producer_index
++;
1693 if (qdev
->lrg_buf_q_producer_index
== NUM_LBUFQ_ENTRIES
)
1694 qdev
->lrg_buf_q_producer_index
= 0;
1696 if (qdev
->lrg_buf_q_producer_index
==
1697 (NUM_LBUFQ_ENTRIES
- 1)) {
1698 lrg_buf_q_ele
= qdev
->lrg_buf_q_virt_addr
;
1702 qdev
->lrg_buf_next_free
= lrg_buf_q_ele
;
1704 ql_write_common_reg(qdev
,
1705 &port_regs
->CommonRegs
.
1706 rxLargeQProducerIndex
,
1707 qdev
->lrg_buf_q_producer_index
);
1711 static void ql_process_mac_tx_intr(struct ql3_adapter
*qdev
,
1712 struct ob_mac_iocb_rsp
*mac_rsp
)
1714 struct ql_tx_buf_cb
*tx_cb
;
1717 tx_cb
= &qdev
->tx_buf
[mac_rsp
->transaction_id
];
1718 pci_unmap_single(qdev
->pdev
,
1719 pci_unmap_addr(&tx_cb
->map
[0], mapaddr
),
1720 pci_unmap_len(&tx_cb
->map
[0], maplen
),
1723 if (tx_cb
->seg_count
) {
1724 for (i
= 1; i
< tx_cb
->seg_count
; i
++) {
1725 pci_unmap_page(qdev
->pdev
,
1726 pci_unmap_addr(&tx_cb
->map
[i
],
1728 pci_unmap_len(&tx_cb
->map
[i
], maplen
),
1732 qdev
->stats
.tx_packets
++;
1733 qdev
->stats
.tx_bytes
+= tx_cb
->skb
->len
;
1734 dev_kfree_skb_irq(tx_cb
->skb
);
1736 atomic_inc(&qdev
->tx_count
);
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1751 static void ql_process_mac_rx_intr(struct ql3_adapter
*qdev
,
1752 struct ib_mac_iocb_rsp
*ib_mac_rsp_ptr
)
1755 u32 lrg_buf_phy_addr_low
= 0;
1756 struct ql_rcv_buf_cb
*lrg_buf_cb1
= NULL
;
1757 struct ql_rcv_buf_cb
*lrg_buf_cb2
= NULL
;
1759 struct sk_buff
*skb
;
1760 u16 length
= le16_to_cpu(ib_mac_rsp_ptr
->length
);
1763 * Get the inbound address list (small buffer).
1765 offset
= qdev
->small_buf_index
* QL_SMALL_BUFFER_SIZE
;
1766 if (++qdev
->small_buf_index
== NUM_SMALL_BUFFERS
)
1767 qdev
->small_buf_index
= 0;
1769 curr_ial_ptr
= (u32
*) (qdev
->small_buf_virt_addr
+ offset
);
1770 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
+ offset
;
1771 qdev
->small_buf_release_cnt
++;
1773 if (qdev
->device_id
== QL3022_DEVICE_ID
) {
1774 /* start of first buffer (3022 only) */
1775 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1776 lrg_buf_cb1
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1777 qdev
->lrg_buf_release_cnt
++;
1778 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
) {
1779 qdev
->lrg_buf_index
= 0;
1781 curr_ial_ptr
++; /* 64-bit pointers require two incs. */
1785 /* start of second buffer */
1786 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1787 lrg_buf_cb2
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1790 * Second buffer gets sent up the stack.
1792 qdev
->lrg_buf_release_cnt
++;
1793 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1794 qdev
->lrg_buf_index
= 0;
1795 skb
= lrg_buf_cb2
->skb
;
1797 qdev
->stats
.rx_packets
++;
1798 qdev
->stats
.rx_bytes
+= length
;
1800 skb_put(skb
, length
);
1801 pci_unmap_single(qdev
->pdev
,
1802 pci_unmap_addr(lrg_buf_cb2
, mapaddr
),
1803 pci_unmap_len(lrg_buf_cb2
, maplen
),
1804 PCI_DMA_FROMDEVICE
);
1805 prefetch(skb
->data
);
1806 skb
->dev
= qdev
->ndev
;
1807 skb
->ip_summed
= CHECKSUM_NONE
;
1808 skb
->protocol
= eth_type_trans(skb
, qdev
->ndev
);
1810 netif_receive_skb(skb
);
1811 qdev
->ndev
->last_rx
= jiffies
;
1812 lrg_buf_cb2
->skb
= NULL
;
1814 if (qdev
->device_id
== QL3022_DEVICE_ID
)
1815 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb1
);
1816 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb2
);
1819 static void ql_process_macip_rx_intr(struct ql3_adapter
*qdev
,
1820 struct ib_ip_iocb_rsp
*ib_ip_rsp_ptr
)
1823 u32 lrg_buf_phy_addr_low
= 0;
1824 struct ql_rcv_buf_cb
*lrg_buf_cb1
= NULL
;
1825 struct ql_rcv_buf_cb
*lrg_buf_cb2
= NULL
;
1827 struct sk_buff
*skb1
= NULL
, *skb2
;
1828 struct net_device
*ndev
= qdev
->ndev
;
1829 u16 length
= le16_to_cpu(ib_ip_rsp_ptr
->length
);
1833 * Get the inbound address list (small buffer).
1836 offset
= qdev
->small_buf_index
* QL_SMALL_BUFFER_SIZE
;
1837 if (++qdev
->small_buf_index
== NUM_SMALL_BUFFERS
)
1838 qdev
->small_buf_index
= 0;
1839 curr_ial_ptr
= (u32
*) (qdev
->small_buf_virt_addr
+ offset
);
1840 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
+ offset
;
1841 qdev
->small_buf_release_cnt
++;
1843 if (qdev
->device_id
== QL3022_DEVICE_ID
) {
1844 /* start of first buffer on 3022 */
1845 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1846 lrg_buf_cb1
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1847 qdev
->lrg_buf_release_cnt
++;
1848 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1849 qdev
->lrg_buf_index
= 0;
1850 skb1
= lrg_buf_cb1
->skb
;
1851 curr_ial_ptr
++; /* 64-bit pointers require two incs. */
1854 if (*((u16
*) skb1
->data
) != 0xFFFF)
1855 size
+= VLAN_ETH_HLEN
- ETH_HLEN
;
1858 /* start of second buffer */
1859 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1860 lrg_buf_cb2
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1861 skb2
= lrg_buf_cb2
->skb
;
1862 qdev
->lrg_buf_release_cnt
++;
1863 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1864 qdev
->lrg_buf_index
= 0;
1866 skb_put(skb2
, length
); /* Just the second buffer length here. */
1867 pci_unmap_single(qdev
->pdev
,
1868 pci_unmap_addr(lrg_buf_cb2
, mapaddr
),
1869 pci_unmap_len(lrg_buf_cb2
, maplen
),
1870 PCI_DMA_FROMDEVICE
);
1871 prefetch(skb2
->data
);
1873 skb2
->ip_summed
= CHECKSUM_NONE
;
1874 if (qdev
->device_id
== QL3022_DEVICE_ID
) {
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1879 memcpy(skb_push(skb2
, size
), skb1
->data
+ VLAN_ID_LEN
, size
);
1881 u16 checksum
= le16_to_cpu(ib_ip_rsp_ptr
->checksum
);
1883 (IB_IP_IOCB_RSP_3032_ICE
|
1884 IB_IP_IOCB_RSP_3032_CE
|
1885 IB_IP_IOCB_RSP_3032_NUC
)) {
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1890 IB_IP_IOCB_RSP_3032_TCP
) ? "TCP" :
1892 } else if (checksum
& IB_IP_IOCB_RSP_3032_TCP
) {
1893 skb2
->ip_summed
= CHECKSUM_UNNECESSARY
;
1896 skb2
->dev
= qdev
->ndev
;
1897 skb2
->protocol
= eth_type_trans(skb2
, qdev
->ndev
);
1899 netif_receive_skb(skb2
);
1900 qdev
->stats
.rx_packets
++;
1901 qdev
->stats
.rx_bytes
+= length
;
1902 ndev
->last_rx
= jiffies
;
1903 lrg_buf_cb2
->skb
= NULL
;
1905 if (qdev
->device_id
== QL3022_DEVICE_ID
)
1906 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb1
);
1907 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb2
);
1910 static int ql_tx_rx_clean(struct ql3_adapter
*qdev
,
1911 int *tx_cleaned
, int *rx_cleaned
, int work_to_do
)
1913 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
1914 struct net_rsp_iocb
*net_rsp
;
1915 struct net_device
*ndev
= qdev
->ndev
;
1916 unsigned long hw_flags
;
1918 /* While there are entries in the completion queue. */
1919 while ((cpu_to_le32(*(qdev
->prsp_producer_index
)) !=
1920 qdev
->rsp_consumer_index
) && (*rx_cleaned
< work_to_do
)) {
1922 net_rsp
= qdev
->rsp_current
;
1923 switch (net_rsp
->opcode
) {
1925 case OPCODE_OB_MAC_IOCB_FN0
:
1926 case OPCODE_OB_MAC_IOCB_FN2
:
1927 ql_process_mac_tx_intr(qdev
, (struct ob_mac_iocb_rsp
*)
1932 case OPCODE_IB_MAC_IOCB
:
1933 case OPCODE_IB_3032_MAC_IOCB
:
1934 ql_process_mac_rx_intr(qdev
, (struct ib_mac_iocb_rsp
*)
1939 case OPCODE_IB_IP_IOCB
:
1940 case OPCODE_IB_3032_IP_IOCB
:
1941 ql_process_macip_rx_intr(qdev
, (struct ib_ip_iocb_rsp
*)
1947 u32
*tmp
= (u32
*) net_rsp
;
1949 "%s: Hit default case, not "
1951 " dropping the packet, opcode = "
1953 ndev
->name
, net_rsp
->opcode
);
1955 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1956 (unsigned long int)tmp
[0],
1957 (unsigned long int)tmp
[1],
1958 (unsigned long int)tmp
[2],
1959 (unsigned long int)tmp
[3]);
1963 qdev
->rsp_consumer_index
++;
1965 if (qdev
->rsp_consumer_index
== NUM_RSP_Q_ENTRIES
) {
1966 qdev
->rsp_consumer_index
= 0;
1967 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
1969 qdev
->rsp_current
++;
1973 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1975 ql_update_lrg_bufq_prod_index(qdev
);
1977 if (qdev
->small_buf_release_cnt
>= 16) {
1978 while (qdev
->small_buf_release_cnt
>= 16) {
1979 qdev
->small_buf_q_producer_index
++;
1981 if (qdev
->small_buf_q_producer_index
==
1983 qdev
->small_buf_q_producer_index
= 0;
1984 qdev
->small_buf_release_cnt
-= 8;
1987 ql_write_common_reg(qdev
,
1988 &port_regs
->CommonRegs
.
1989 rxSmallQProducerIndex
,
1990 qdev
->small_buf_q_producer_index
);
1993 ql_write_common_reg(qdev
,
1994 &port_regs
->CommonRegs
.rspQConsumerIndex
,
1995 qdev
->rsp_consumer_index
);
1996 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1998 if (unlikely(netif_queue_stopped(qdev
->ndev
))) {
1999 if (netif_queue_stopped(qdev
->ndev
) &&
2000 (atomic_read(&qdev
->tx_count
) > (NUM_REQ_Q_ENTRIES
/ 4)))
2001 netif_wake_queue(qdev
->ndev
);
2004 return *tx_cleaned
+ *rx_cleaned
;
2007 static int ql_poll(struct net_device
*ndev
, int *budget
)
2009 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
2010 int work_to_do
= min(*budget
, ndev
->quota
);
2011 int rx_cleaned
= 0, tx_cleaned
= 0;
2013 if (!netif_carrier_ok(ndev
))
2016 ql_tx_rx_clean(qdev
, &tx_cleaned
, &rx_cleaned
, work_to_do
);
2017 *budget
-= rx_cleaned
;
2018 ndev
->quota
-= rx_cleaned
;
2020 if ((!tx_cleaned
&& !rx_cleaned
) || !netif_running(ndev
)) {
2022 netif_rx_complete(ndev
);
2023 ql_enable_interrupts(qdev
);
2029 static irqreturn_t
ql3xxx_isr(int irq
, void *dev_id
)
2032 struct net_device
*ndev
= dev_id
;
2033 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
2034 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2039 port_regs
= qdev
->mem_map_registers
;
2042 ql_read_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
2044 if (value
& (ISP_CONTROL_FE
| ISP_CONTROL_RI
)) {
2045 spin_lock(&qdev
->adapter_lock
);
2046 netif_stop_queue(qdev
->ndev
);
2047 netif_carrier_off(qdev
->ndev
);
2048 ql_disable_interrupts(qdev
);
2049 qdev
->port_link_state
= LS_DOWN
;
2050 set_bit(QL_RESET_ACTIVE
,&qdev
->flags
) ;
2052 if (value
& ISP_CONTROL_FE
) {
2057 ql_read_page0_reg_l(qdev
,
2058 &port_regs
->PortFatalErrStatus
);
2059 printk(KERN_WARNING PFX
2060 "%s: Resetting chip. PortFatalErrStatus "
2061 "register = 0x%x\n", ndev
->name
, var
);
2062 set_bit(QL_RESET_START
,&qdev
->flags
) ;
2065 * Soft Reset Requested.
2067 set_bit(QL_RESET_PER_SCSI
,&qdev
->flags
) ;
2069 "%s: Another function issued a reset to the "
2070 "chip. ISR value = %x.\n", ndev
->name
, value
);
2072 queue_delayed_work(qdev
->workqueue
, &qdev
->reset_work
, 0);
2073 spin_unlock(&qdev
->adapter_lock
);
2074 } else if (value
& ISP_IMR_DISABLE_CMPL_INT
) {
2075 ql_disable_interrupts(qdev
);
2076 if (likely(netif_rx_schedule_prep(ndev
)))
2077 __netif_rx_schedule(ndev
);
2079 ql_enable_interrupts(qdev
);
2084 return IRQ_RETVAL(handled
);
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2096 static int ql_get_seg_count(unsigned short frags
)
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2122 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2123 struct ob_mac_iocb_req
*mac_iocb_ptr
)
2126 struct iphdr
*ip
= NULL
;
2127 u8 offset
= ETH_HLEN
;
2129 eth
= (struct ethhdr
*)(skb
->data
);
2131 if (eth
->h_proto
== __constant_htons(ETH_P_IP
)) {
2132 ip
= (struct iphdr
*)&skb
->data
[ETH_HLEN
];
2133 } else if (eth
->h_proto
== htons(ETH_P_8021Q
) &&
2134 ((struct vlan_ethhdr
*)skb
->data
)->
2135 h_vlan_encapsulated_proto
== __constant_htons(ETH_P_IP
)) {
2136 ip
= (struct iphdr
*)&skb
->data
[VLAN_ETH_HLEN
];
2137 offset
= VLAN_ETH_HLEN
;
2141 if (ip
->protocol
== IPPROTO_TCP
) {
2142 mac_iocb_ptr
->flags1
|= OB_3032MAC_IOCB_REQ_TC
;
2143 mac_iocb_ptr
->ip_hdr_off
= offset
;
2144 mac_iocb_ptr
->ip_hdr_len
= ip
->ihl
;
2145 } else if (ip
->protocol
== IPPROTO_UDP
) {
2146 mac_iocb_ptr
->flags1
|= OB_3032MAC_IOCB_REQ_UC
;
2147 mac_iocb_ptr
->ip_hdr_off
= offset
;
2148 mac_iocb_ptr
->ip_hdr_len
= ip
->ihl
;
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2164 static int ql3xxx_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2166 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
2167 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2168 struct ql_tx_buf_cb
*tx_cb
;
2169 u32 tot_len
= skb
->len
;
2171 struct oal_entry
*oal_entry
;
2173 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2175 int seg_cnt
, seg
= 0;
2176 int frag_cnt
= (int)skb_shinfo(skb
)->nr_frags
;
2178 if (unlikely(atomic_read(&qdev
->tx_count
) < 2)) {
2179 if (!netif_queue_stopped(ndev
))
2180 netif_stop_queue(ndev
);
2181 return NETDEV_TX_BUSY
;
2183 tx_cb
= &qdev
->tx_buf
[qdev
->req_producer_index
] ;
2184 seg_cnt
= tx_cb
->seg_count
= ql_get_seg_count((skb_shinfo(skb
)->nr_frags
));
2186 printk(KERN_ERR PFX
"%s: invalid segment count!\n",__func__
);
2187 return NETDEV_TX_OK
;
2190 mac_iocb_ptr
= tx_cb
->queue_entry
;
2191 memset((void *)mac_iocb_ptr
, 0, sizeof(struct ob_mac_iocb_req
));
2192 mac_iocb_ptr
->opcode
= qdev
->mac_ob_opcode
;
2193 mac_iocb_ptr
->flags
|= qdev
->mb_bit_mask
;
2194 mac_iocb_ptr
->transaction_id
= qdev
->req_producer_index
;
2195 mac_iocb_ptr
->data_len
= cpu_to_le16((u16
) tot_len
);
2197 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2198 ql_hw_csum_setup(skb
, mac_iocb_ptr
);
2199 len
= skb_headlen(skb
);
2200 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2201 oal_entry
= (struct oal_entry
*)&mac_iocb_ptr
->buf_addr0_low
;
2202 oal_entry
->dma_lo
= cpu_to_le32(LS_64BITS(map
));
2203 oal_entry
->dma_hi
= cpu_to_le32(MS_64BITS(map
));
2204 oal_entry
->len
= cpu_to_le32(len
);
2205 pci_unmap_addr_set(&tx_cb
->map
[seg
], mapaddr
, map
);
2206 pci_unmap_len_set(&tx_cb
->map
[seg
], maplen
, len
);
2209 if (!skb_shinfo(skb
)->nr_frags
) {
2210 /* Terminate the last segment. */
2212 cpu_to_le32(le32_to_cpu(oal_entry
->len
) | OAL_LAST_ENTRY
);
2216 for (i
=0; i
<frag_cnt
; i
++,seg
++) {
2217 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2219 if ((seg
== 2 && seg_cnt
> 3) || /* Check for continuation */
2220 (seg
== 7 && seg_cnt
> 8) || /* requirements. It's strange */
2221 (seg
== 12 && seg_cnt
> 13) || /* but necessary. */
2222 (seg
== 17 && seg_cnt
> 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map
= pci_map_single(qdev
->pdev
, oal
,
2227 oal_entry
->dma_lo
= cpu_to_le32(LS_64BITS(map
));
2228 oal_entry
->dma_hi
= cpu_to_le32(MS_64BITS(map
));
2230 cpu_to_le32(sizeof(struct oal
) |
2232 pci_unmap_addr_set(&tx_cb
->map
[seg
], mapaddr
,
2234 pci_unmap_len_set(&tx_cb
->map
[seg
], maplen
,
2236 oal_entry
= (struct oal_entry
*)oal
;
2242 pci_map_page(qdev
->pdev
, frag
->page
,
2243 frag
->page_offset
, frag
->size
,
2245 oal_entry
->dma_lo
= cpu_to_le32(LS_64BITS(map
));
2246 oal_entry
->dma_hi
= cpu_to_le32(MS_64BITS(map
));
2247 oal_entry
->len
= cpu_to_le32(frag
->size
);
2248 pci_unmap_addr_set(&tx_cb
->map
[seg
], mapaddr
, map
);
2249 pci_unmap_len_set(&tx_cb
->map
[seg
], maplen
,
2252 /* Terminate the last segment. */
2254 cpu_to_le32(le32_to_cpu(oal_entry
->len
) | OAL_LAST_ENTRY
);
2257 qdev
->req_producer_index
++;
2258 if (qdev
->req_producer_index
== NUM_REQ_Q_ENTRIES
)
2259 qdev
->req_producer_index
= 0;
2261 ql_write_common_reg_l(qdev
,
2262 &port_regs
->CommonRegs
.reqQProducerIndex
,
2263 qdev
->req_producer_index
);
2265 ndev
->trans_start
= jiffies
;
2266 if (netif_msg_tx_queued(qdev
))
2267 printk(KERN_DEBUG PFX
"%s: tx queued, slot %d, len %d\n",
2268 ndev
->name
, qdev
->req_producer_index
, skb
->len
);
2270 atomic_dec(&qdev
->tx_count
);
2271 return NETDEV_TX_OK
;
2274 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter
*qdev
)
2277 (u32
) (NUM_REQ_Q_ENTRIES
* sizeof(struct ob_mac_iocb_req
));
2279 qdev
->req_q_virt_addr
=
2280 pci_alloc_consistent(qdev
->pdev
,
2281 (size_t) qdev
->req_q_size
,
2282 &qdev
->req_q_phy_addr
);
2284 if ((qdev
->req_q_virt_addr
== NULL
) ||
2285 LS_64BITS(qdev
->req_q_phy_addr
) & (qdev
->req_q_size
- 1)) {
2286 printk(KERN_ERR PFX
"%s: reqQ failed.\n",
2291 qdev
->rsp_q_size
= NUM_RSP_Q_ENTRIES
* sizeof(struct net_rsp_iocb
);
2293 qdev
->rsp_q_virt_addr
=
2294 pci_alloc_consistent(qdev
->pdev
,
2295 (size_t) qdev
->rsp_q_size
,
2296 &qdev
->rsp_q_phy_addr
);
2298 if ((qdev
->rsp_q_virt_addr
== NULL
) ||
2299 LS_64BITS(qdev
->rsp_q_phy_addr
) & (qdev
->rsp_q_size
- 1)) {
2301 "%s: rspQ allocation failed\n",
2303 pci_free_consistent(qdev
->pdev
, (size_t) qdev
->req_q_size
,
2304 qdev
->req_q_virt_addr
,
2305 qdev
->req_q_phy_addr
);
2309 set_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
);
2314 static void ql_free_net_req_rsp_queues(struct ql3_adapter
*qdev
)
2316 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
)) {
2317 printk(KERN_INFO PFX
2318 "%s: Already done.\n", qdev
->ndev
->name
);
2322 pci_free_consistent(qdev
->pdev
,
2324 qdev
->req_q_virt_addr
, qdev
->req_q_phy_addr
);
2326 qdev
->req_q_virt_addr
= NULL
;
2328 pci_free_consistent(qdev
->pdev
,
2330 qdev
->rsp_q_virt_addr
, qdev
->rsp_q_phy_addr
);
2332 qdev
->rsp_q_virt_addr
= NULL
;
2334 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
);
2337 static int ql_alloc_buffer_queues(struct ql3_adapter
*qdev
)
2339 /* Create Large Buffer Queue */
2340 qdev
->lrg_buf_q_size
=
2341 NUM_LBUFQ_ENTRIES
* sizeof(struct lrg_buf_q_entry
);
2342 if (qdev
->lrg_buf_q_size
< PAGE_SIZE
)
2343 qdev
->lrg_buf_q_alloc_size
= PAGE_SIZE
;
2345 qdev
->lrg_buf_q_alloc_size
= qdev
->lrg_buf_q_size
* 2;
2347 qdev
->lrg_buf_q_alloc_virt_addr
=
2348 pci_alloc_consistent(qdev
->pdev
,
2349 qdev
->lrg_buf_q_alloc_size
,
2350 &qdev
->lrg_buf_q_alloc_phy_addr
);
2352 if (qdev
->lrg_buf_q_alloc_virt_addr
== NULL
) {
2354 "%s: lBufQ failed\n", qdev
->ndev
->name
);
2357 qdev
->lrg_buf_q_virt_addr
= qdev
->lrg_buf_q_alloc_virt_addr
;
2358 qdev
->lrg_buf_q_phy_addr
= qdev
->lrg_buf_q_alloc_phy_addr
;
2360 /* Create Small Buffer Queue */
2361 qdev
->small_buf_q_size
=
2362 NUM_SBUFQ_ENTRIES
* sizeof(struct lrg_buf_q_entry
);
2363 if (qdev
->small_buf_q_size
< PAGE_SIZE
)
2364 qdev
->small_buf_q_alloc_size
= PAGE_SIZE
;
2366 qdev
->small_buf_q_alloc_size
= qdev
->small_buf_q_size
* 2;
2368 qdev
->small_buf_q_alloc_virt_addr
=
2369 pci_alloc_consistent(qdev
->pdev
,
2370 qdev
->small_buf_q_alloc_size
,
2371 &qdev
->small_buf_q_alloc_phy_addr
);
2373 if (qdev
->small_buf_q_alloc_virt_addr
== NULL
) {
2375 "%s: Small Buffer Queue allocation failed.\n",
2377 pci_free_consistent(qdev
->pdev
, qdev
->lrg_buf_q_alloc_size
,
2378 qdev
->lrg_buf_q_alloc_virt_addr
,
2379 qdev
->lrg_buf_q_alloc_phy_addr
);
2383 qdev
->small_buf_q_virt_addr
= qdev
->small_buf_q_alloc_virt_addr
;
2384 qdev
->small_buf_q_phy_addr
= qdev
->small_buf_q_alloc_phy_addr
;
2385 set_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
);
2389 static void ql_free_buffer_queues(struct ql3_adapter
*qdev
)
2391 if (!test_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
)) {
2392 printk(KERN_INFO PFX
2393 "%s: Already done.\n", qdev
->ndev
->name
);
2397 pci_free_consistent(qdev
->pdev
,
2398 qdev
->lrg_buf_q_alloc_size
,
2399 qdev
->lrg_buf_q_alloc_virt_addr
,
2400 qdev
->lrg_buf_q_alloc_phy_addr
);
2402 qdev
->lrg_buf_q_virt_addr
= NULL
;
2404 pci_free_consistent(qdev
->pdev
,
2405 qdev
->small_buf_q_alloc_size
,
2406 qdev
->small_buf_q_alloc_virt_addr
,
2407 qdev
->small_buf_q_alloc_phy_addr
);
2409 qdev
->small_buf_q_virt_addr
= NULL
;
2411 clear_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
);
2414 static int ql_alloc_small_buffers(struct ql3_adapter
*qdev
)
2417 struct bufq_addr_element
*small_buf_q_entry
;
2419 /* Currently we allocate on one of memory and use it for smallbuffers */
2420 qdev
->small_buf_total_size
=
2421 (QL_ADDR_ELE_PER_BUFQ_ENTRY
* NUM_SBUFQ_ENTRIES
*
2422 QL_SMALL_BUFFER_SIZE
);
2424 qdev
->small_buf_virt_addr
=
2425 pci_alloc_consistent(qdev
->pdev
,
2426 qdev
->small_buf_total_size
,
2427 &qdev
->small_buf_phy_addr
);
2429 if (qdev
->small_buf_virt_addr
== NULL
) {
2431 "%s: Failed to get small buffer memory.\n",
2436 qdev
->small_buf_phy_addr_low
= LS_64BITS(qdev
->small_buf_phy_addr
);
2437 qdev
->small_buf_phy_addr_high
= MS_64BITS(qdev
->small_buf_phy_addr
);
2439 small_buf_q_entry
= qdev
->small_buf_q_virt_addr
;
2441 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
;
2443 /* Initialize the small buffer queue. */
2444 for (i
= 0; i
< (QL_ADDR_ELE_PER_BUFQ_ENTRY
* NUM_SBUFQ_ENTRIES
); i
++) {
2445 small_buf_q_entry
->addr_high
=
2446 cpu_to_le32(qdev
->small_buf_phy_addr_high
);
2447 small_buf_q_entry
->addr_low
=
2448 cpu_to_le32(qdev
->small_buf_phy_addr_low
+
2449 (i
* QL_SMALL_BUFFER_SIZE
));
2450 small_buf_q_entry
++;
2452 qdev
->small_buf_index
= 0;
2453 set_bit(QL_ALLOC_SMALL_BUF_DONE
,&qdev
->flags
);
2457 static void ql_free_small_buffers(struct ql3_adapter
*qdev
)
2459 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE
,&qdev
->flags
)) {
2460 printk(KERN_INFO PFX
2461 "%s: Already done.\n", qdev
->ndev
->name
);
2464 if (qdev
->small_buf_virt_addr
!= NULL
) {
2465 pci_free_consistent(qdev
->pdev
,
2466 qdev
->small_buf_total_size
,
2467 qdev
->small_buf_virt_addr
,
2468 qdev
->small_buf_phy_addr
);
2470 qdev
->small_buf_virt_addr
= NULL
;
2474 static void ql_free_large_buffers(struct ql3_adapter
*qdev
)
2477 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2479 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2480 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2481 if (lrg_buf_cb
->skb
) {
2482 dev_kfree_skb(lrg_buf_cb
->skb
);
2483 pci_unmap_single(qdev
->pdev
,
2484 pci_unmap_addr(lrg_buf_cb
, mapaddr
),
2485 pci_unmap_len(lrg_buf_cb
, maplen
),
2486 PCI_DMA_FROMDEVICE
);
2487 memset(lrg_buf_cb
, 0, sizeof(struct ql_rcv_buf_cb
));
2494 static void ql_init_large_buffers(struct ql3_adapter
*qdev
)
2497 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2498 struct bufq_addr_element
*buf_addr_ele
= qdev
->lrg_buf_q_virt_addr
;
2500 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2501 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2502 buf_addr_ele
->addr_high
= lrg_buf_cb
->buf_phy_addr_high
;
2503 buf_addr_ele
->addr_low
= lrg_buf_cb
->buf_phy_addr_low
;
2506 qdev
->lrg_buf_index
= 0;
2507 qdev
->lrg_buf_skb_check
= 0;
2510 static int ql_alloc_large_buffers(struct ql3_adapter
*qdev
)
2513 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2514 struct sk_buff
*skb
;
2517 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2518 skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
2519 if (unlikely(!skb
)) {
2520 /* Better luck next round */
2522 "%s: large buff alloc failed, "
2523 "for %d bytes at index %d.\n",
2525 qdev
->lrg_buffer_len
* 2, i
);
2526 ql_free_large_buffers(qdev
);
2530 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2531 memset(lrg_buf_cb
, 0, sizeof(struct ql_rcv_buf_cb
));
2532 lrg_buf_cb
->index
= i
;
2533 lrg_buf_cb
->skb
= skb
;
2535 * We save some space to copy the ethhdr from first
2538 skb_reserve(skb
, QL_HEADER_SPACE
);
2539 map
= pci_map_single(qdev
->pdev
,
2541 qdev
->lrg_buffer_len
-
2543 PCI_DMA_FROMDEVICE
);
2544 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
2545 pci_unmap_len_set(lrg_buf_cb
, maplen
,
2546 qdev
->lrg_buffer_len
-
2548 lrg_buf_cb
->buf_phy_addr_low
=
2549 cpu_to_le32(LS_64BITS(map
));
2550 lrg_buf_cb
->buf_phy_addr_high
=
2551 cpu_to_le32(MS_64BITS(map
));
2557 static void ql_free_send_free_list(struct ql3_adapter
*qdev
)
2559 struct ql_tx_buf_cb
*tx_cb
;
2562 tx_cb
= &qdev
->tx_buf
[0];
2563 for (i
= 0; i
< NUM_REQ_Q_ENTRIES
; i
++) {
2572 static int ql_create_send_free_list(struct ql3_adapter
*qdev
)
2574 struct ql_tx_buf_cb
*tx_cb
;
2576 struct ob_mac_iocb_req
*req_q_curr
=
2577 qdev
->req_q_virt_addr
;
2579 /* Create free list of transmit buffers */
2580 for (i
= 0; i
< NUM_REQ_Q_ENTRIES
; i
++) {
2582 tx_cb
= &qdev
->tx_buf
[i
];
2584 tx_cb
->queue_entry
= req_q_curr
;
2586 tx_cb
->oal
= kmalloc(512, GFP_KERNEL
);
2587 if (tx_cb
->oal
== NULL
)
2593 static int ql_alloc_mem_resources(struct ql3_adapter
*qdev
)
2595 if (qdev
->ndev
->mtu
== NORMAL_MTU_SIZE
)
2596 qdev
->lrg_buffer_len
= NORMAL_MTU_SIZE
;
2597 else if (qdev
->ndev
->mtu
== JUMBO_MTU_SIZE
) {
2598 qdev
->lrg_buffer_len
= JUMBO_MTU_SIZE
;
2601 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2605 qdev
->lrg_buffer_len
+= VLAN_ETH_HLEN
+ VLAN_ID_LEN
+ QL_HEADER_SPACE
;
2606 qdev
->max_frame_size
=
2607 (qdev
->lrg_buffer_len
- QL_HEADER_SPACE
) + ETHERNET_CRC_SIZE
;
2610 * First allocate a page of shared memory and use it for shadow
2611 * locations of Network Request Queue Consumer Address Register and
2612 * Network Completion Queue Producer Index Register
2614 qdev
->shadow_reg_virt_addr
=
2615 pci_alloc_consistent(qdev
->pdev
,
2616 PAGE_SIZE
, &qdev
->shadow_reg_phy_addr
);
2618 if (qdev
->shadow_reg_virt_addr
!= NULL
) {
2619 qdev
->preq_consumer_index
= (u16
*) qdev
->shadow_reg_virt_addr
;
2620 qdev
->req_consumer_index_phy_addr_high
=
2621 MS_64BITS(qdev
->shadow_reg_phy_addr
);
2622 qdev
->req_consumer_index_phy_addr_low
=
2623 LS_64BITS(qdev
->shadow_reg_phy_addr
);
2625 qdev
->prsp_producer_index
=
2626 (u32
*) (((u8
*) qdev
->preq_consumer_index
) + 8);
2627 qdev
->rsp_producer_index_phy_addr_high
=
2628 qdev
->req_consumer_index_phy_addr_high
;
2629 qdev
->rsp_producer_index_phy_addr_low
=
2630 qdev
->req_consumer_index_phy_addr_low
+ 8;
2633 "%s: shadowReg Alloc failed.\n", qdev
->ndev
->name
);
2637 if (ql_alloc_net_req_rsp_queues(qdev
) != 0) {
2639 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2644 if (ql_alloc_buffer_queues(qdev
) != 0) {
2646 "%s: ql_alloc_buffer_queues failed.\n",
2648 goto err_buffer_queues
;
2651 if (ql_alloc_small_buffers(qdev
) != 0) {
2653 "%s: ql_alloc_small_buffers failed\n", qdev
->ndev
->name
);
2654 goto err_small_buffers
;
2657 if (ql_alloc_large_buffers(qdev
) != 0) {
2659 "%s: ql_alloc_large_buffers failed\n", qdev
->ndev
->name
);
2660 goto err_small_buffers
;
2663 /* Initialize the large buffer queue. */
2664 ql_init_large_buffers(qdev
);
2665 if (ql_create_send_free_list(qdev
))
2668 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
2672 ql_free_send_free_list(qdev
);
2674 ql_free_buffer_queues(qdev
);
2676 ql_free_net_req_rsp_queues(qdev
);
2678 pci_free_consistent(qdev
->pdev
,
2680 qdev
->shadow_reg_virt_addr
,
2681 qdev
->shadow_reg_phy_addr
);
2686 static void ql_free_mem_resources(struct ql3_adapter
*qdev
)
2688 ql_free_send_free_list(qdev
);
2689 ql_free_large_buffers(qdev
);
2690 ql_free_small_buffers(qdev
);
2691 ql_free_buffer_queues(qdev
);
2692 ql_free_net_req_rsp_queues(qdev
);
2693 if (qdev
->shadow_reg_virt_addr
!= NULL
) {
2694 pci_free_consistent(qdev
->pdev
,
2696 qdev
->shadow_reg_virt_addr
,
2697 qdev
->shadow_reg_phy_addr
);
2698 qdev
->shadow_reg_virt_addr
= NULL
;
2702 static int ql_init_misc_registers(struct ql3_adapter
*qdev
)
2704 struct ql3xxx_local_ram_registers __iomem
*local_ram
=
2705 (void __iomem
*)qdev
->mem_map_registers
;
2707 if(ql_sem_spinlock(qdev
, QL_DDR_RAM_SEM_MASK
,
2708 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
2712 ql_write_page2_reg(qdev
,
2713 &local_ram
->bufletSize
, qdev
->nvram_data
.bufletSize
);
2715 ql_write_page2_reg(qdev
,
2716 &local_ram
->maxBufletCount
,
2717 qdev
->nvram_data
.bufletCount
);
2719 ql_write_page2_reg(qdev
,
2720 &local_ram
->freeBufletThresholdLow
,
2721 (qdev
->nvram_data
.tcpWindowThreshold25
<< 16) |
2722 (qdev
->nvram_data
.tcpWindowThreshold0
));
2724 ql_write_page2_reg(qdev
,
2725 &local_ram
->freeBufletThresholdHigh
,
2726 qdev
->nvram_data
.tcpWindowThreshold50
);
2728 ql_write_page2_reg(qdev
,
2729 &local_ram
->ipHashTableBase
,
2730 (qdev
->nvram_data
.ipHashTableBaseHi
<< 16) |
2731 qdev
->nvram_data
.ipHashTableBaseLo
);
2732 ql_write_page2_reg(qdev
,
2733 &local_ram
->ipHashTableCount
,
2734 qdev
->nvram_data
.ipHashTableSize
);
2735 ql_write_page2_reg(qdev
,
2736 &local_ram
->tcpHashTableBase
,
2737 (qdev
->nvram_data
.tcpHashTableBaseHi
<< 16) |
2738 qdev
->nvram_data
.tcpHashTableBaseLo
);
2739 ql_write_page2_reg(qdev
,
2740 &local_ram
->tcpHashTableCount
,
2741 qdev
->nvram_data
.tcpHashTableSize
);
2742 ql_write_page2_reg(qdev
,
2743 &local_ram
->ncbBase
,
2744 (qdev
->nvram_data
.ncbTableBaseHi
<< 16) |
2745 qdev
->nvram_data
.ncbTableBaseLo
);
2746 ql_write_page2_reg(qdev
,
2747 &local_ram
->maxNcbCount
,
2748 qdev
->nvram_data
.ncbTableSize
);
2749 ql_write_page2_reg(qdev
,
2750 &local_ram
->drbBase
,
2751 (qdev
->nvram_data
.drbTableBaseHi
<< 16) |
2752 qdev
->nvram_data
.drbTableBaseLo
);
2753 ql_write_page2_reg(qdev
,
2754 &local_ram
->maxDrbCount
,
2755 qdev
->nvram_data
.drbTableSize
);
2756 ql_sem_unlock(qdev
, QL_DDR_RAM_SEM_MASK
);
2760 static int ql_adapter_initialize(struct ql3_adapter
*qdev
)
2763 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2764 struct ql3xxx_host_memory_registers __iomem
*hmem_regs
=
2765 (void __iomem
*)port_regs
;
2769 if(ql_mii_setup(qdev
))
2772 /* Bring out PHY out of reset */
2773 ql_write_common_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
2774 (ISP_SERIAL_PORT_IF_WE
|
2775 (ISP_SERIAL_PORT_IF_WE
<< 16)));
2777 qdev
->port_link_state
= LS_DOWN
;
2778 netif_carrier_off(qdev
->ndev
);
2780 /* V2 chip fix for ARS-39168. */
2781 ql_write_common_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
2782 (ISP_SERIAL_PORT_IF_SDE
|
2783 (ISP_SERIAL_PORT_IF_SDE
<< 16)));
2785 /* Request Queue Registers */
2786 *((u32
*) (qdev
->preq_consumer_index
)) = 0;
2787 atomic_set(&qdev
->tx_count
,NUM_REQ_Q_ENTRIES
);
2788 qdev
->req_producer_index
= 0;
2790 ql_write_page1_reg(qdev
,
2791 &hmem_regs
->reqConsumerIndexAddrHigh
,
2792 qdev
->req_consumer_index_phy_addr_high
);
2793 ql_write_page1_reg(qdev
,
2794 &hmem_regs
->reqConsumerIndexAddrLow
,
2795 qdev
->req_consumer_index_phy_addr_low
);
2797 ql_write_page1_reg(qdev
,
2798 &hmem_regs
->reqBaseAddrHigh
,
2799 MS_64BITS(qdev
->req_q_phy_addr
));
2800 ql_write_page1_reg(qdev
,
2801 &hmem_regs
->reqBaseAddrLow
,
2802 LS_64BITS(qdev
->req_q_phy_addr
));
2803 ql_write_page1_reg(qdev
, &hmem_regs
->reqLength
, NUM_REQ_Q_ENTRIES
);
2805 /* Response Queue Registers */
2806 *((u16
*) (qdev
->prsp_producer_index
)) = 0;
2807 qdev
->rsp_consumer_index
= 0;
2808 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
2810 ql_write_page1_reg(qdev
,
2811 &hmem_regs
->rspProducerIndexAddrHigh
,
2812 qdev
->rsp_producer_index_phy_addr_high
);
2814 ql_write_page1_reg(qdev
,
2815 &hmem_regs
->rspProducerIndexAddrLow
,
2816 qdev
->rsp_producer_index_phy_addr_low
);
2818 ql_write_page1_reg(qdev
,
2819 &hmem_regs
->rspBaseAddrHigh
,
2820 MS_64BITS(qdev
->rsp_q_phy_addr
));
2822 ql_write_page1_reg(qdev
,
2823 &hmem_regs
->rspBaseAddrLow
,
2824 LS_64BITS(qdev
->rsp_q_phy_addr
));
2826 ql_write_page1_reg(qdev
, &hmem_regs
->rspLength
, NUM_RSP_Q_ENTRIES
);
2828 /* Large Buffer Queue */
2829 ql_write_page1_reg(qdev
,
2830 &hmem_regs
->rxLargeQBaseAddrHigh
,
2831 MS_64BITS(qdev
->lrg_buf_q_phy_addr
));
2833 ql_write_page1_reg(qdev
,
2834 &hmem_regs
->rxLargeQBaseAddrLow
,
2835 LS_64BITS(qdev
->lrg_buf_q_phy_addr
));
2837 ql_write_page1_reg(qdev
, &hmem_regs
->rxLargeQLength
, NUM_LBUFQ_ENTRIES
);
2839 ql_write_page1_reg(qdev
,
2840 &hmem_regs
->rxLargeBufferLength
,
2841 qdev
->lrg_buffer_len
);
2843 /* Small Buffer Queue */
2844 ql_write_page1_reg(qdev
,
2845 &hmem_regs
->rxSmallQBaseAddrHigh
,
2846 MS_64BITS(qdev
->small_buf_q_phy_addr
));
2848 ql_write_page1_reg(qdev
,
2849 &hmem_regs
->rxSmallQBaseAddrLow
,
2850 LS_64BITS(qdev
->small_buf_q_phy_addr
));
2852 ql_write_page1_reg(qdev
, &hmem_regs
->rxSmallQLength
, NUM_SBUFQ_ENTRIES
);
2853 ql_write_page1_reg(qdev
,
2854 &hmem_regs
->rxSmallBufferLength
,
2855 QL_SMALL_BUFFER_SIZE
);
2857 qdev
->small_buf_q_producer_index
= NUM_SBUFQ_ENTRIES
- 1;
2858 qdev
->small_buf_release_cnt
= 8;
2859 qdev
->lrg_buf_q_producer_index
= NUM_LBUFQ_ENTRIES
- 1;
2860 qdev
->lrg_buf_release_cnt
= 8;
2861 qdev
->lrg_buf_next_free
=
2862 (struct bufq_addr_element
*)qdev
->lrg_buf_q_virt_addr
;
2863 qdev
->small_buf_index
= 0;
2864 qdev
->lrg_buf_index
= 0;
2865 qdev
->lrg_buf_free_count
= 0;
2866 qdev
->lrg_buf_free_head
= NULL
;
2867 qdev
->lrg_buf_free_tail
= NULL
;
2869 ql_write_common_reg(qdev
,
2870 &port_regs
->CommonRegs
.
2871 rxSmallQProducerIndex
,
2872 qdev
->small_buf_q_producer_index
);
2873 ql_write_common_reg(qdev
,
2874 &port_regs
->CommonRegs
.
2875 rxLargeQProducerIndex
,
2876 qdev
->lrg_buf_q_producer_index
);
2879 * Find out if the chip has already been initialized. If it has, then
2880 * we skip some of the initialization.
2882 clear_bit(QL_LINK_MASTER
, &qdev
->flags
);
2883 value
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
2884 if ((value
& PORT_STATUS_IC
) == 0) {
2886 /* Chip has not been configured yet, so let it rip. */
2887 if(ql_init_misc_registers(qdev
)) {
2892 if (qdev
->mac_index
)
2893 ql_write_page0_reg(qdev
,
2894 &port_regs
->mac1MaxFrameLengthReg
,
2895 qdev
->max_frame_size
);
2897 ql_write_page0_reg(qdev
,
2898 &port_regs
->mac0MaxFrameLengthReg
,
2899 qdev
->max_frame_size
);
2901 value
= qdev
->nvram_data
.tcpMaxWindowSize
;
2902 ql_write_page0_reg(qdev
, &port_regs
->tcpMaxWindow
, value
);
2904 value
= (0xFFFF << 16) | qdev
->nvram_data
.extHwConfig
;
2906 if(ql_sem_spinlock(qdev
, QL_FLASH_SEM_MASK
,
2907 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
)
2912 ql_write_page0_reg(qdev
, &port_regs
->ExternalHWConfig
, value
);
2913 ql_write_page0_reg(qdev
, &port_regs
->InternalChipConfig
,
2914 (((INTERNAL_CHIP_SD
| INTERNAL_CHIP_WE
) <<
2915 16) | (INTERNAL_CHIP_SD
|
2916 INTERNAL_CHIP_WE
)));
2917 ql_sem_unlock(qdev
, QL_FLASH_SEM_MASK
);
2921 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
2922 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
2928 ql_init_scan_mode(qdev
);
2929 ql_get_phy_owner(qdev
);
2931 /* Load the MAC Configuration */
2933 /* Program lower 32 bits of the MAC address */
2934 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2935 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16));
2936 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
2937 ((qdev
->ndev
->dev_addr
[2] << 24)
2938 | (qdev
->ndev
->dev_addr
[3] << 16)
2939 | (qdev
->ndev
->dev_addr
[4] << 8)
2940 | qdev
->ndev
->dev_addr
[5]));
2942 /* Program top 16 bits of the MAC address */
2943 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2944 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16) | 1));
2945 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
2946 ((qdev
->ndev
->dev_addr
[0] << 8)
2947 | qdev
->ndev
->dev_addr
[1]));
2949 /* Enable Primary MAC */
2950 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2951 ((MAC_ADDR_INDIRECT_PTR_REG_PE
<< 16) |
2952 MAC_ADDR_INDIRECT_PTR_REG_PE
));
2954 /* Clear Primary and Secondary IP addresses */
2955 ql_write_page0_reg(qdev
, &port_regs
->ipAddrIndexReg
,
2956 ((IP_ADDR_INDEX_REG_MASK
<< 16) |
2957 (qdev
->mac_index
<< 2)));
2958 ql_write_page0_reg(qdev
, &port_regs
->ipAddrDataReg
, 0);
2960 ql_write_page0_reg(qdev
, &port_regs
->ipAddrIndexReg
,
2961 ((IP_ADDR_INDEX_REG_MASK
<< 16) |
2962 ((qdev
->mac_index
<< 2) + 1)));
2963 ql_write_page0_reg(qdev
, &port_regs
->ipAddrDataReg
, 0);
2965 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
2967 /* Indicate Configuration Complete */
2968 ql_write_page0_reg(qdev
,
2969 &port_regs
->portControl
,
2970 ((PORT_CONTROL_CC
<< 16) | PORT_CONTROL_CC
));
2973 value
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
2974 if (value
& PORT_STATUS_IC
)
2981 "%s: Hw Initialization timeout.\n", qdev
->ndev
->name
);
2986 /* Enable Ethernet Function */
2987 if (qdev
->device_id
== QL3032_DEVICE_ID
) {
2989 (QL3032_PORT_CONTROL_EF
| QL3032_PORT_CONTROL_KIE
|
2990 QL3032_PORT_CONTROL_EIv6
| QL3032_PORT_CONTROL_EIv4
);
2991 ql_write_page0_reg(qdev
, &port_regs
->functionControl
,
2992 ((value
<< 16) | value
));
2995 (PORT_CONTROL_EF
| PORT_CONTROL_ET
| PORT_CONTROL_EI
|
2997 ql_write_page0_reg(qdev
, &port_regs
->portControl
,
2998 ((value
<< 16) | value
));
3007 * Caller holds hw_lock.
3009 static int ql_adapter_reset(struct ql3_adapter
*qdev
)
3011 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3016 set_bit(QL_RESET_ACTIVE
, &qdev
->flags
);
3017 clear_bit(QL_RESET_DONE
, &qdev
->flags
);
3020 * Issue soft reset to chip.
3022 printk(KERN_DEBUG PFX
3023 "%s: Issue soft reset to chip.\n",
3025 ql_write_common_reg(qdev
,
3026 &port_regs
->CommonRegs
.ispControlStatus
,
3027 ((ISP_CONTROL_SR
<< 16) | ISP_CONTROL_SR
));
3029 /* Wait 3 seconds for reset to complete. */
3030 printk(KERN_DEBUG PFX
3031 "%s: Wait 10 milliseconds for reset to complete.\n",
3034 /* Wait until the firmware tells us the Soft Reset is done */
3038 ql_read_common_reg(qdev
,
3039 &port_regs
->CommonRegs
.ispControlStatus
);
3040 if ((value
& ISP_CONTROL_SR
) == 0)
3044 } while ((--max_wait_time
));
3047 * Also, make sure that the Network Reset Interrupt bit has been
3048 * cleared after the soft reset has taken place.
3051 ql_read_common_reg(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
3052 if (value
& ISP_CONTROL_RI
) {
3053 printk(KERN_DEBUG PFX
3054 "ql_adapter_reset: clearing RI after reset.\n");
3055 ql_write_common_reg(qdev
,
3056 &port_regs
->CommonRegs
.
3058 ((ISP_CONTROL_RI
<< 16) | ISP_CONTROL_RI
));
3061 if (max_wait_time
== 0) {
3062 /* Issue Force Soft Reset */
3063 ql_write_common_reg(qdev
,
3064 &port_regs
->CommonRegs
.
3066 ((ISP_CONTROL_FSR
<< 16) |
3069 * Wait until the firmware tells us the Force Soft Reset is
3075 ql_read_common_reg(qdev
,
3076 &port_regs
->CommonRegs
.
3078 if ((value
& ISP_CONTROL_FSR
) == 0) {
3082 } while ((--max_wait_time
));
3084 if (max_wait_time
== 0)
3087 clear_bit(QL_RESET_ACTIVE
, &qdev
->flags
);
3088 set_bit(QL_RESET_DONE
, &qdev
->flags
);
3092 static void ql_set_mac_info(struct ql3_adapter
*qdev
)
3094 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3095 u32 value
, port_status
;
3098 /* Get the function number */
3100 ql_read_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
3101 func_number
= (u8
) ((value
>> 4) & OPCODE_FUNC_ID_MASK
);
3102 port_status
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
3103 switch (value
& ISP_CONTROL_FN_MASK
) {
3104 case ISP_CONTROL_FN0_NET
:
3105 qdev
->mac_index
= 0;
3106 qdev
->mac_ob_opcode
= OUTBOUND_MAC_IOCB
| func_number
;
3107 qdev
->tcp_ob_opcode
= OUTBOUND_TCP_IOCB
| func_number
;
3108 qdev
->update_ob_opcode
= UPDATE_NCB_IOCB
| func_number
;
3109 qdev
->mb_bit_mask
= FN0_MA_BITS_MASK
;
3110 qdev
->PHYAddr
= PORT0_PHY_ADDRESS
;
3111 if (port_status
& PORT_STATUS_SM0
)
3112 set_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
3114 clear_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
3117 case ISP_CONTROL_FN1_NET
:
3118 qdev
->mac_index
= 1;
3119 qdev
->mac_ob_opcode
= OUTBOUND_MAC_IOCB
| func_number
;
3120 qdev
->tcp_ob_opcode
= OUTBOUND_TCP_IOCB
| func_number
;
3121 qdev
->update_ob_opcode
= UPDATE_NCB_IOCB
| func_number
;
3122 qdev
->mb_bit_mask
= FN1_MA_BITS_MASK
;
3123 qdev
->PHYAddr
= PORT1_PHY_ADDRESS
;
3124 if (port_status
& PORT_STATUS_SM1
)
3125 set_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
3127 clear_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
3130 case ISP_CONTROL_FN0_SCSI
:
3131 case ISP_CONTROL_FN1_SCSI
:
3133 printk(KERN_DEBUG PFX
3134 "%s: Invalid function number, ispControlStatus = 0x%x\n",
3135 qdev
->ndev
->name
,value
);
3138 qdev
->numPorts
= qdev
->nvram_data
.numPorts
;
3141 static void ql_display_dev_info(struct net_device
*ndev
)
3143 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
3144 struct pci_dev
*pdev
= qdev
->pdev
;
3146 printk(KERN_INFO PFX
3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3148 DRV_NAME
, qdev
->index
, qdev
->chip_rev_id
,
3149 (qdev
->device_id
== QL3032_DEVICE_ID
) ? "QLA3032" : "QLA3022",
3151 printk(KERN_INFO PFX
3153 test_bit(QL_LINK_OPTICAL
,&qdev
->flags
) ? "OPTICAL" : "COPPER");
3156 * Print PCI bus width/type.
3158 printk(KERN_INFO PFX
3159 "Bus interface is %s %s.\n",
3160 ((qdev
->pci_width
== 64) ? "64-bit" : "32-bit"),
3161 ((qdev
->pci_x
) ? "PCI-X" : "PCI"));
3163 printk(KERN_INFO PFX
3164 "mem IO base address adjusted = 0x%p\n",
3165 qdev
->mem_map_registers
);
3166 printk(KERN_INFO PFX
"Interrupt number = %d\n", pdev
->irq
);
3168 if (netif_msg_probe(qdev
))
3169 printk(KERN_INFO PFX
3170 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3171 ndev
->name
, ndev
->dev_addr
[0], ndev
->dev_addr
[1],
3172 ndev
->dev_addr
[2], ndev
->dev_addr
[3], ndev
->dev_addr
[4],
3176 static int ql_adapter_down(struct ql3_adapter
*qdev
, int do_reset
)
3178 struct net_device
*ndev
= qdev
->ndev
;
3181 netif_stop_queue(ndev
);
3182 netif_carrier_off(ndev
);
3184 clear_bit(QL_ADAPTER_UP
,&qdev
->flags
);
3185 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
3187 ql_disable_interrupts(qdev
);
3189 free_irq(qdev
->pdev
->irq
, ndev
);
3191 if (qdev
->msi
&& test_bit(QL_MSI_ENABLED
,&qdev
->flags
)) {
3192 printk(KERN_INFO PFX
3193 "%s: calling pci_disable_msi().\n", qdev
->ndev
->name
);
3194 clear_bit(QL_MSI_ENABLED
,&qdev
->flags
);
3195 pci_disable_msi(qdev
->pdev
);
3198 del_timer_sync(&qdev
->adapter_timer
);
3200 netif_poll_disable(ndev
);
3204 unsigned long hw_flags
;
3206 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3207 if (ql_wait_for_drvr_lock(qdev
)) {
3208 if ((soft_reset
= ql_adapter_reset(qdev
))) {
3210 "%s: ql_adapter_reset(%d) FAILED!\n",
3211 ndev
->name
, qdev
->index
);
3214 "%s: Releaseing driver lock via chip reset.\n",ndev
->name
);
3217 "%s: Could not acquire driver lock to do "
3218 "reset!\n", ndev
->name
);
3221 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3223 ql_free_mem_resources(qdev
);
3227 static int ql_adapter_up(struct ql3_adapter
*qdev
)
3229 struct net_device
*ndev
= qdev
->ndev
;
3231 unsigned long irq_flags
= SA_SAMPLE_RANDOM
| SA_SHIRQ
;
3232 unsigned long hw_flags
;
3234 if (ql_alloc_mem_resources(qdev
)) {
3236 "%s Unable to allocate buffers.\n", ndev
->name
);
3241 if (pci_enable_msi(qdev
->pdev
)) {
3243 "%s: User requested MSI, but MSI failed to "
3244 "initialize. Continuing without MSI.\n",
3248 printk(KERN_INFO PFX
"%s: MSI Enabled...\n", qdev
->ndev
->name
);
3249 set_bit(QL_MSI_ENABLED
,&qdev
->flags
);
3250 irq_flags
&= ~SA_SHIRQ
;
3254 if ((err
= request_irq(qdev
->pdev
->irq
,
3256 irq_flags
, ndev
->name
, ndev
))) {
3258 "%s: Failed to reserve interrupt %d already in use.\n",
3259 ndev
->name
, qdev
->pdev
->irq
);
3263 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3265 if ((err
= ql_wait_for_drvr_lock(qdev
))) {
3266 if ((err
= ql_adapter_initialize(qdev
))) {
3268 "%s: Unable to initialize adapter.\n",
3273 "%s: Releaseing driver lock.\n",ndev
->name
);
3274 ql_sem_unlock(qdev
, QL_DRVR_SEM_MASK
);
3277 "%s: Could not aquire driver lock.\n",
3282 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3284 set_bit(QL_ADAPTER_UP
,&qdev
->flags
);
3286 mod_timer(&qdev
->adapter_timer
, jiffies
+ HZ
* 1);
3288 netif_poll_enable(ndev
);
3289 ql_enable_interrupts(qdev
);
3293 ql_sem_unlock(qdev
, QL_DRVR_SEM_MASK
);
3295 free_irq(qdev
->pdev
->irq
, ndev
);
3297 if (qdev
->msi
&& test_bit(QL_MSI_ENABLED
,&qdev
->flags
)) {
3298 printk(KERN_INFO PFX
3299 "%s: calling pci_disable_msi().\n",
3301 clear_bit(QL_MSI_ENABLED
,&qdev
->flags
);
3302 pci_disable_msi(qdev
->pdev
);
3307 static int ql_cycle_adapter(struct ql3_adapter
*qdev
, int reset
)
3309 if( ql_adapter_down(qdev
,reset
) || ql_adapter_up(qdev
)) {
3311 "%s: Driver up/down cycle failed, "
3312 "closing device\n",qdev
->ndev
->name
);
3313 dev_close(qdev
->ndev
);
3319 static int ql3xxx_close(struct net_device
*ndev
)
3321 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3324 * Wait for device to recover from a reset.
3325 * (Rarely happens, but possible.)
3327 while (!test_bit(QL_ADAPTER_UP
,&qdev
->flags
))
3330 ql_adapter_down(qdev
,QL_DO_RESET
);
3334 static int ql3xxx_open(struct net_device
*ndev
)
3336 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3337 return (ql_adapter_up(qdev
));
3340 static struct net_device_stats
*ql3xxx_get_stats(struct net_device
*dev
)
3342 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)dev
->priv
;
3343 return &qdev
->stats
;
3346 static int ql3xxx_change_mtu(struct net_device
*ndev
, int new_mtu
)
3348 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3349 printk(KERN_ERR PFX
"%s: new mtu size = %d.\n", ndev
->name
, new_mtu
);
3350 if (new_mtu
!= NORMAL_MTU_SIZE
&& new_mtu
!= JUMBO_MTU_SIZE
) {
3352 "%s: mtu size of %d is not valid. Use exactly %d or "
3353 "%d.\n", ndev
->name
, new_mtu
, NORMAL_MTU_SIZE
,
3358 if (!netif_running(ndev
)) {
3359 ndev
->mtu
= new_mtu
;
3363 ndev
->mtu
= new_mtu
;
3364 return ql_cycle_adapter(qdev
,QL_DO_RESET
);
3367 static void ql3xxx_set_multicast_list(struct net_device
*ndev
)
3370 * We are manually parsing the list in the net_device structure.
3375 static int ql3xxx_set_mac_address(struct net_device
*ndev
, void *p
)
3377 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
3378 struct ql3xxx_port_registers __iomem
*port_regs
=
3379 qdev
->mem_map_registers
;
3380 struct sockaddr
*addr
= p
;
3381 unsigned long hw_flags
;
3383 if (netif_running(ndev
))
3386 if (!is_valid_ether_addr(addr
->sa_data
))
3387 return -EADDRNOTAVAIL
;
3389 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3391 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3392 /* Program lower 32 bits of the MAC address */
3393 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
3394 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16));
3395 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
3396 ((ndev
->dev_addr
[2] << 24) | (ndev
->
3397 dev_addr
[3] << 16) |
3398 (ndev
->dev_addr
[4] << 8) | ndev
->dev_addr
[5]));
3400 /* Program top 16 bits of the MAC address */
3401 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
3402 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16) | 1));
3403 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
3404 ((ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]));
3405 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3410 static void ql3xxx_tx_timeout(struct net_device
*ndev
)
3412 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
3414 printk(KERN_ERR PFX
"%s: Resetting...\n", ndev
->name
);
3416 * Stop the queues, we've got a problem.
3418 netif_stop_queue(ndev
);
3421 * Wake up the worker to process this event.
3423 queue_delayed_work(qdev
->workqueue
, &qdev
->tx_timeout_work
, 0);
3426 static void ql_reset_work(struct work_struct
*work
)
3428 struct ql3_adapter
*qdev
=
3429 container_of(work
, struct ql3_adapter
, reset_work
.work
);
3430 struct net_device
*ndev
= qdev
->ndev
;
3432 struct ql_tx_buf_cb
*tx_cb
;
3433 int max_wait_time
, i
;
3434 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3435 unsigned long hw_flags
;
3437 if (test_bit((QL_RESET_PER_SCSI
| QL_RESET_START
),&qdev
->flags
)) {
3438 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
3441 * Loop through the active list and return the skb.
3443 for (i
= 0; i
< NUM_REQ_Q_ENTRIES
; i
++) {
3445 tx_cb
= &qdev
->tx_buf
[i
];
3447 printk(KERN_DEBUG PFX
3448 "%s: Freeing lost SKB.\n",
3450 pci_unmap_single(qdev
->pdev
,
3451 pci_unmap_addr(&tx_cb
->map
[0], mapaddr
),
3452 pci_unmap_len(&tx_cb
->map
[0], maplen
),
3454 for(j
=1;j
<tx_cb
->seg_count
;j
++) {
3455 pci_unmap_page(qdev
->pdev
,
3456 pci_unmap_addr(&tx_cb
->map
[j
],mapaddr
),
3457 pci_unmap_len(&tx_cb
->map
[j
],maplen
),
3460 dev_kfree_skb(tx_cb
->skb
);
3466 "%s: Clearing NRI after reset.\n", qdev
->ndev
->name
);
3467 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3468 ql_write_common_reg(qdev
,
3469 &port_regs
->CommonRegs
.
3471 ((ISP_CONTROL_RI
<< 16) | ISP_CONTROL_RI
));
3473 * Wait the for Soft Reset to Complete.
3477 value
= ql_read_common_reg(qdev
,
3478 &port_regs
->CommonRegs
.
3481 if ((value
& ISP_CONTROL_SR
) == 0) {
3482 printk(KERN_DEBUG PFX
3483 "%s: reset completed.\n",
3488 if (value
& ISP_CONTROL_RI
) {
3489 printk(KERN_DEBUG PFX
3490 "%s: clearing NRI after reset.\n",
3492 ql_write_common_reg(qdev
,
3497 16) | ISP_CONTROL_RI
));
3501 } while (--max_wait_time
);
3502 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3504 if (value
& ISP_CONTROL_SR
) {
3507 * Set the reset flags and clear the board again.
3508 * Nothing else to do...
3511 "%s: Timed out waiting for reset to "
3512 "complete.\n", ndev
->name
);
3514 "%s: Do a reset.\n", ndev
->name
);
3515 clear_bit(QL_RESET_PER_SCSI
,&qdev
->flags
);
3516 clear_bit(QL_RESET_START
,&qdev
->flags
);
3517 ql_cycle_adapter(qdev
,QL_DO_RESET
);
3521 clear_bit(QL_RESET_ACTIVE
,&qdev
->flags
);
3522 clear_bit(QL_RESET_PER_SCSI
,&qdev
->flags
);
3523 clear_bit(QL_RESET_START
,&qdev
->flags
);
3524 ql_cycle_adapter(qdev
,QL_NO_RESET
);
3528 static void ql_tx_timeout_work(struct work_struct
*work
)
3530 struct ql3_adapter
*qdev
=
3531 container_of(work
, struct ql3_adapter
, tx_timeout_work
.work
);
3533 ql_cycle_adapter(qdev
, QL_DO_RESET
);
3536 static void ql_get_board_info(struct ql3_adapter
*qdev
)
3538 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3541 value
= ql_read_page0_reg_l(qdev
, &port_regs
->portStatus
);
3543 qdev
->chip_rev_id
= ((value
& PORT_STATUS_REV_ID_MASK
) >> 12);
3544 if (value
& PORT_STATUS_64
)
3545 qdev
->pci_width
= 64;
3547 qdev
->pci_width
= 32;
3548 if (value
& PORT_STATUS_X
)
3552 qdev
->pci_slot
= (u8
) PCI_SLOT(qdev
->pdev
->devfn
);
3555 static void ql3xxx_timer(unsigned long ptr
)
3557 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)ptr
;
3559 if (test_bit(QL_RESET_ACTIVE
,&qdev
->flags
)) {
3560 printk(KERN_DEBUG PFX
3561 "%s: Reset in progress.\n",
3566 ql_link_state_machine(qdev
);
3568 /* Restart timer on 2 second interval. */
3570 mod_timer(&qdev
->adapter_timer
, jiffies
+ HZ
* 1);
3573 static int __devinit
ql3xxx_probe(struct pci_dev
*pdev
,
3574 const struct pci_device_id
*pci_entry
)
3576 struct net_device
*ndev
= NULL
;
3577 struct ql3_adapter
*qdev
= NULL
;
3578 static int cards_found
= 0;
3579 int pci_using_dac
, err
;
3581 err
= pci_enable_device(pdev
);
3583 printk(KERN_ERR PFX
"%s cannot enable PCI device\n",
3588 err
= pci_request_regions(pdev
, DRV_NAME
);
3590 printk(KERN_ERR PFX
"%s cannot obtain PCI resources\n",
3592 goto err_out_disable_pdev
;
3595 pci_set_master(pdev
);
3597 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3599 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3600 } else if (!(err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))) {
3602 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3606 printk(KERN_ERR PFX
"%s no usable DMA configuration\n",
3608 goto err_out_free_regions
;
3611 ndev
= alloc_etherdev(sizeof(struct ql3_adapter
));
3613 goto err_out_free_regions
;
3615 SET_MODULE_OWNER(ndev
);
3616 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3618 pci_set_drvdata(pdev
, ndev
);
3620 qdev
= netdev_priv(ndev
);
3621 qdev
->index
= cards_found
;
3624 qdev
->device_id
= pci_entry
->device
;
3625 qdev
->port_link_state
= LS_DOWN
;
3629 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3632 ndev
->features
|= NETIF_F_HIGHDMA
;
3633 if (qdev
->device_id
== QL3032_DEVICE_ID
)
3634 ndev
->features
|= (NETIF_F_HW_CSUM
| NETIF_F_SG
);
3636 qdev
->mem_map_registers
=
3637 ioremap_nocache(pci_resource_start(pdev
, 1),
3638 pci_resource_len(qdev
->pdev
, 1));
3639 if (!qdev
->mem_map_registers
) {
3640 printk(KERN_ERR PFX
"%s: cannot map device registers\n",
3642 goto err_out_free_ndev
;
3645 spin_lock_init(&qdev
->adapter_lock
);
3646 spin_lock_init(&qdev
->hw_lock
);
3648 /* Set driver entry points */
3649 ndev
->open
= ql3xxx_open
;
3650 ndev
->hard_start_xmit
= ql3xxx_send
;
3651 ndev
->stop
= ql3xxx_close
;
3652 ndev
->get_stats
= ql3xxx_get_stats
;
3653 ndev
->change_mtu
= ql3xxx_change_mtu
;
3654 ndev
->set_multicast_list
= ql3xxx_set_multicast_list
;
3655 SET_ETHTOOL_OPS(ndev
, &ql3xxx_ethtool_ops
);
3656 ndev
->set_mac_address
= ql3xxx_set_mac_address
;
3657 ndev
->tx_timeout
= ql3xxx_tx_timeout
;
3658 ndev
->watchdog_timeo
= 5 * HZ
;
3660 ndev
->poll
= &ql_poll
;
3663 ndev
->irq
= pdev
->irq
;
3665 /* make sure the EEPROM is good */
3666 if (ql_get_nvram_params(qdev
)) {
3667 printk(KERN_ALERT PFX
3668 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3670 goto err_out_iounmap
;
3673 ql_set_mac_info(qdev
);
3675 /* Validate and set parameters */
3676 if (qdev
->mac_index
) {
3677 memcpy(ndev
->dev_addr
, &qdev
->nvram_data
.funcCfg_fn2
.macAddress
,
3680 memcpy(ndev
->dev_addr
, &qdev
->nvram_data
.funcCfg_fn0
.macAddress
,
3683 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3685 ndev
->tx_queue_len
= NUM_REQ_Q_ENTRIES
;
3687 /* Turn off support for multicasting */
3688 ndev
->flags
&= ~IFF_MULTICAST
;
3690 /* Record PCI bus information. */
3691 ql_get_board_info(qdev
);
3694 * Set the Maximum Memory Read Byte Count value. We do this to handle
3698 pci_write_config_word(pdev
, (int)0x4e, (u16
) 0x0036);
3701 err
= register_netdev(ndev
);
3703 printk(KERN_ERR PFX
"%s: cannot register net device\n",
3705 goto err_out_iounmap
;
3708 /* we're going to reset, so assume we have no link for now */
3710 netif_carrier_off(ndev
);
3711 netif_stop_queue(ndev
);
3713 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3714 INIT_DELAYED_WORK(&qdev
->reset_work
, ql_reset_work
);
3715 INIT_DELAYED_WORK(&qdev
->tx_timeout_work
, ql_tx_timeout_work
);
3717 init_timer(&qdev
->adapter_timer
);
3718 qdev
->adapter_timer
.function
= ql3xxx_timer
;
3719 qdev
->adapter_timer
.expires
= jiffies
+ HZ
* 2; /* two second delay */
3720 qdev
->adapter_timer
.data
= (unsigned long)qdev
;
3723 printk(KERN_ALERT PFX
"%s\n", DRV_STRING
);
3724 printk(KERN_ALERT PFX
"Driver name: %s, Version: %s.\n",
3725 DRV_NAME
, DRV_VERSION
);
3727 ql_display_dev_info(ndev
);
3733 iounmap(qdev
->mem_map_registers
);
3736 err_out_free_regions
:
3737 pci_release_regions(pdev
);
3738 err_out_disable_pdev
:
3739 pci_disable_device(pdev
);
3740 pci_set_drvdata(pdev
, NULL
);
3745 static void __devexit
ql3xxx_remove(struct pci_dev
*pdev
)
3747 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3748 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3750 unregister_netdev(ndev
);
3751 qdev
= netdev_priv(ndev
);
3753 ql_disable_interrupts(qdev
);
3755 if (qdev
->workqueue
) {
3756 cancel_delayed_work(&qdev
->reset_work
);
3757 cancel_delayed_work(&qdev
->tx_timeout_work
);
3758 destroy_workqueue(qdev
->workqueue
);
3759 qdev
->workqueue
= NULL
;
3762 iounmap(qdev
->mem_map_registers
);
3763 pci_release_regions(pdev
);
3764 pci_set_drvdata(pdev
, NULL
);
3768 static struct pci_driver ql3xxx_driver
= {
3771 .id_table
= ql3xxx_pci_tbl
,
3772 .probe
= ql3xxx_probe
,
3773 .remove
= __devexit_p(ql3xxx_remove
),
3776 static int __init
ql3xxx_init_module(void)
3778 return pci_register_driver(&ql3xxx_driver
);
3781 static void __exit
ql3xxx_exit(void)
3783 pci_unregister_driver(&ql3xxx_driver
);
3786 module_init(ql3xxx_init_module
);
3787 module_exit(ql3xxx_exit
);