1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
29 #include <linux/of_device.h>
34 #define DRV_MODULE_NAME "niu"
35 #define PFX DRV_MODULE_NAME ": "
36 #define DRV_MODULE_VERSION "1.0"
37 #define DRV_MODULE_RELDATE "Nov 14, 2008"
39 static char version
[] __devinitdata
=
40 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION
);
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK 0x00000fffffffffffULL
52 static u64
readq(void __iomem
*reg
)
54 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
57 static void writeq(u64 val
, void __iomem
*reg
)
59 writel(val
& 0xffffffff, reg
);
60 writel(val
>> 32, reg
+ 0x4UL
);
64 static struct pci_device_id niu_pci_tbl
[] = {
65 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
69 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
71 #define NIU_TX_TIMEOUT (5 * HZ)
73 #define nr64(reg) readq(np->regs + (reg))
74 #define nw64(reg, val) writeq((val), np->regs + (reg))
76 #define nr64_mac(reg) readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
79 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
82 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
85 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
91 static int debug
= -1;
92 module_param(debug
, int, 0);
93 MODULE_PARM_DESC(debug
, "NIU debug level");
95 #define niudbg(TYPE, f, a...) \
96 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97 printk(KERN_DEBUG PFX f, ## a); \
100 #define niuinfo(TYPE, f, a...) \
101 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102 printk(KERN_INFO PFX f, ## a); \
105 #define niuwarn(TYPE, f, a...) \
106 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107 printk(KERN_WARNING PFX f, ## a); \
110 #define niu_lock_parent(np, flags) \
111 spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113 spin_unlock_irqrestore(&np->parent->lock, flags)
115 static int serdes_init_10g_serdes(struct niu
*np
);
117 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
118 u64 bits
, int limit
, int delay
)
120 while (--limit
>= 0) {
121 u64 val
= nr64_mac(reg
);
132 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
133 u64 bits
, int limit
, int delay
,
134 const char *reg_name
)
139 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
141 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
142 "would not clear, val[%llx]\n",
143 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
144 (unsigned long long) nr64_mac(reg
));
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
153 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
154 u64 bits
, int limit
, int delay
)
156 while (--limit
>= 0) {
157 u64 val
= nr64_ipp(reg
);
168 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
169 u64 bits
, int limit
, int delay
,
170 const char *reg_name
)
179 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
181 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
182 "would not clear, val[%llx]\n",
183 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
184 (unsigned long long) nr64_ipp(reg
));
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
193 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
194 u64 bits
, int limit
, int delay
)
196 while (--limit
>= 0) {
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
213 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
214 u64 bits
, int limit
, int delay
,
215 const char *reg_name
)
220 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
222 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
223 "would not clear, val[%llx]\n",
224 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
225 (unsigned long long) nr64(reg
));
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
234 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
236 u64 val
= (u64
) lp
->timer
;
239 val
|= LDG_IMGMT_ARM
;
241 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
244 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
246 unsigned long mask_reg
, bits
;
249 if (ldn
< 0 || ldn
> LDN_MAX
)
253 mask_reg
= LD_IM0(ldn
);
256 mask_reg
= LD_IM1(ldn
- 64);
260 val
= nr64(mask_reg
);
270 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
272 struct niu_parent
*parent
= np
->parent
;
275 for (i
= 0; i
<= LDN_MAX
; i
++) {
278 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
281 err
= niu_ldn_irq_enable(np
, i
, on
);
288 static int niu_enable_interrupts(struct niu
*np
, int on
)
292 for (i
= 0; i
< np
->num_ldg
; i
++) {
293 struct niu_ldg
*lp
= &np
->ldg
[i
];
296 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
300 for (i
= 0; i
< np
->num_ldg
; i
++)
301 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
306 static u32
phy_encode(u32 type
, int port
)
308 return (type
<< (port
* 2));
311 static u32
phy_decode(u32 val
, int port
)
313 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
316 static int mdio_wait(struct niu
*np
)
321 while (--limit
> 0) {
322 val
= nr64(MIF_FRAME_OUTPUT
);
323 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
324 return val
& MIF_FRAME_OUTPUT_DATA
;
332 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
336 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
341 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
342 return mdio_wait(np
);
345 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
349 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
354 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
362 static int mii_read(struct niu
*np
, int port
, int reg
)
364 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
365 return mdio_wait(np
);
368 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
372 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
380 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
384 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
385 ESR2_TI_PLL_TX_CFG_L(channel
),
388 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
389 ESR2_TI_PLL_TX_CFG_H(channel
),
394 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
398 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
399 ESR2_TI_PLL_RX_CFG_L(channel
),
402 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
403 ESR2_TI_PLL_RX_CFG_H(channel
),
408 /* Mode is always 10G fiber. */
409 static int serdes_init_niu_10g_fiber(struct niu
*np
)
411 struct niu_link_config
*lp
= &np
->link_config
;
415 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
416 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
417 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
418 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
420 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
421 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
423 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
424 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
426 tx_cfg
|= PLL_TX_CFG_ENTEST
;
427 rx_cfg
|= PLL_RX_CFG_ENTEST
;
430 /* Initialize all 4 lanes of the SERDES. */
431 for (i
= 0; i
< 4; i
++) {
432 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
437 for (i
= 0; i
< 4; i
++) {
438 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
446 static int serdes_init_niu_1g_serdes(struct niu
*np
)
448 struct niu_link_config
*lp
= &np
->link_config
;
449 u16 pll_cfg
, pll_sts
;
456 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
|
457 PLL_TX_CFG_RATE_HALF
);
458 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
459 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
460 PLL_RX_CFG_RATE_HALF
);
463 rx_cfg
|= PLL_RX_CFG_EQ_LP_ADAPTIVE
;
465 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
466 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
468 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
469 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
471 tx_cfg
|= PLL_TX_CFG_ENTEST
;
472 rx_cfg
|= PLL_RX_CFG_ENTEST
;
475 /* Initialize PLL for 1G */
476 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_8X
);
478 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
479 ESR2_TI_PLL_CFG_L
, pll_cfg
);
481 dev_err(np
->device
, PFX
"NIU Port %d "
482 "serdes_init_niu_1g_serdes: "
483 "mdio write to ESR2_TI_PLL_CFG_L failed", np
->port
);
487 pll_sts
= PLL_CFG_ENPLL
;
489 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
490 ESR2_TI_PLL_STS_L
, pll_sts
);
492 dev_err(np
->device
, PFX
"NIU Port %d "
493 "serdes_init_niu_1g_serdes: "
494 "mdio write to ESR2_TI_PLL_STS_L failed", np
->port
);
500 /* Initialize all 4 lanes of the SERDES. */
501 for (i
= 0; i
< 4; i
++) {
502 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
507 for (i
= 0; i
< 4; i
++) {
508 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
515 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
520 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
528 while (max_retry
--) {
529 sig
= nr64(ESR_INT_SIGNALS
);
530 if ((sig
& mask
) == val
)
536 if ((sig
& mask
) != val
) {
537 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
538 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
545 static int serdes_init_niu_10g_serdes(struct niu
*np
)
547 struct niu_link_config
*lp
= &np
->link_config
;
548 u32 tx_cfg
, rx_cfg
, pll_cfg
, pll_sts
;
554 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
555 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
556 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
557 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
559 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
560 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
562 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
563 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
565 tx_cfg
|= PLL_TX_CFG_ENTEST
;
566 rx_cfg
|= PLL_RX_CFG_ENTEST
;
569 /* Initialize PLL for 10G */
570 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_10X
);
572 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
573 ESR2_TI_PLL_CFG_L
, pll_cfg
& 0xffff);
575 dev_err(np
->device
, PFX
"NIU Port %d "
576 "serdes_init_niu_10g_serdes: "
577 "mdio write to ESR2_TI_PLL_CFG_L failed", np
->port
);
581 pll_sts
= PLL_CFG_ENPLL
;
583 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
584 ESR2_TI_PLL_STS_L
, pll_sts
& 0xffff);
586 dev_err(np
->device
, PFX
"NIU Port %d "
587 "serdes_init_niu_10g_serdes: "
588 "mdio write to ESR2_TI_PLL_STS_L failed", np
->port
);
594 /* Initialize all 4 lanes of the SERDES. */
595 for (i
= 0; i
< 4; i
++) {
596 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
601 for (i
= 0; i
< 4; i
++) {
602 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
607 /* check if serdes is ready */
611 mask
= ESR_INT_SIGNALS_P0_BITS
;
612 val
= (ESR_INT_SRDY0_P0
|
622 mask
= ESR_INT_SIGNALS_P1_BITS
;
623 val
= (ESR_INT_SRDY0_P1
|
636 while (max_retry
--) {
637 sig
= nr64(ESR_INT_SIGNALS
);
638 if ((sig
& mask
) == val
)
644 if ((sig
& mask
) != val
) {
645 pr_info(PFX
"NIU Port %u signal bits [%08x] are not "
646 "[%08x] for 10G...trying 1G\n",
647 np
->port
, (int) (sig
& mask
), (int) val
);
649 /* 10G failed, try initializing at 1G */
650 err
= serdes_init_niu_1g_serdes(np
);
652 np
->flags
&= ~NIU_FLAGS_10G
;
653 np
->mac_xcvr
= MAC_XCVR_PCS
;
655 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES "
656 "Link Failed \n", np
->port
);
663 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
667 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
669 *val
= (err
& 0xffff);
670 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
671 ESR_RXTX_CTRL_H(chan
));
673 *val
|= ((err
& 0xffff) << 16);
679 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
683 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
684 ESR_GLUE_CTRL0_L(chan
));
686 *val
= (err
& 0xffff);
687 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
688 ESR_GLUE_CTRL0_H(chan
));
690 *val
|= ((err
& 0xffff) << 16);
697 static int esr_read_reset(struct niu
*np
, u32
*val
)
701 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
702 ESR_RXTX_RESET_CTRL_L
);
704 *val
= (err
& 0xffff);
705 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
706 ESR_RXTX_RESET_CTRL_H
);
708 *val
|= ((err
& 0xffff) << 16);
715 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
719 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
720 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
722 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
723 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
727 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
731 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
732 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
734 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
735 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
739 static int esr_reset(struct niu
*np
)
744 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
745 ESR_RXTX_RESET_CTRL_L
, 0x0000);
748 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
749 ESR_RXTX_RESET_CTRL_H
, 0xffff);
754 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
755 ESR_RXTX_RESET_CTRL_L
, 0xffff);
760 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
761 ESR_RXTX_RESET_CTRL_H
, 0x0000);
766 err
= esr_read_reset(np
, &reset
);
770 dev_err(np
->device
, PFX
"Port %u ESR_RESET "
771 "did not clear [%08x]\n",
779 static int serdes_init_10g(struct niu
*np
)
781 struct niu_link_config
*lp
= &np
->link_config
;
782 unsigned long ctrl_reg
, test_cfg_reg
, i
;
783 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
788 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
789 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
792 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
793 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
799 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
800 ENET_SERDES_CTRL_SDET_1
|
801 ENET_SERDES_CTRL_SDET_2
|
802 ENET_SERDES_CTRL_SDET_3
|
803 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
804 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
805 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
806 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
807 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
808 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
809 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
810 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
813 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
814 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
815 ENET_SERDES_TEST_MD_0_SHIFT
) |
816 (ENET_TEST_MD_PAD_LOOPBACK
<<
817 ENET_SERDES_TEST_MD_1_SHIFT
) |
818 (ENET_TEST_MD_PAD_LOOPBACK
<<
819 ENET_SERDES_TEST_MD_2_SHIFT
) |
820 (ENET_TEST_MD_PAD_LOOPBACK
<<
821 ENET_SERDES_TEST_MD_3_SHIFT
));
824 nw64(ctrl_reg
, ctrl_val
);
825 nw64(test_cfg_reg
, test_cfg_val
);
827 /* Initialize all 4 lanes of the SERDES. */
828 for (i
= 0; i
< 4; i
++) {
829 u32 rxtx_ctrl
, glue0
;
831 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
834 err
= esr_read_glue0(np
, i
, &glue0
);
838 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
839 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
840 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
842 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
843 ESR_GLUE_CTRL0_THCNT
|
844 ESR_GLUE_CTRL0_BLTIME
);
845 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
846 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
847 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
848 (BLTIME_300_CYCLES
<<
849 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
851 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
854 err
= esr_write_glue0(np
, i
, glue0
);
863 sig
= nr64(ESR_INT_SIGNALS
);
866 mask
= ESR_INT_SIGNALS_P0_BITS
;
867 val
= (ESR_INT_SRDY0_P0
|
877 mask
= ESR_INT_SIGNALS_P1_BITS
;
878 val
= (ESR_INT_SRDY0_P1
|
891 if ((sig
& mask
) != val
) {
892 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
893 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
896 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
897 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
900 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
901 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
905 static int serdes_init_1g(struct niu
*np
)
909 val
= nr64(ENET_SERDES_1_PLL_CFG
);
910 val
&= ~ENET_SERDES_PLL_FBDIV2
;
913 val
|= ENET_SERDES_PLL_HRATE0
;
916 val
|= ENET_SERDES_PLL_HRATE1
;
919 val
|= ENET_SERDES_PLL_HRATE2
;
922 val
|= ENET_SERDES_PLL_HRATE3
;
927 nw64(ENET_SERDES_1_PLL_CFG
, val
);
932 static int serdes_init_1g_serdes(struct niu
*np
)
934 struct niu_link_config
*lp
= &np
->link_config
;
935 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
936 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
938 u64 reset_val
, val_rd
;
940 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
941 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
942 ENET_SERDES_PLL_FBDIV0
;
945 reset_val
= ENET_SERDES_RESET_0
;
946 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
947 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
948 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
951 reset_val
= ENET_SERDES_RESET_1
;
952 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
953 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
954 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
960 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
961 ENET_SERDES_CTRL_SDET_1
|
962 ENET_SERDES_CTRL_SDET_2
|
963 ENET_SERDES_CTRL_SDET_3
|
964 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
965 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
966 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
967 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
968 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
969 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
970 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
971 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
974 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
975 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
976 ENET_SERDES_TEST_MD_0_SHIFT
) |
977 (ENET_TEST_MD_PAD_LOOPBACK
<<
978 ENET_SERDES_TEST_MD_1_SHIFT
) |
979 (ENET_TEST_MD_PAD_LOOPBACK
<<
980 ENET_SERDES_TEST_MD_2_SHIFT
) |
981 (ENET_TEST_MD_PAD_LOOPBACK
<<
982 ENET_SERDES_TEST_MD_3_SHIFT
));
985 nw64(ENET_SERDES_RESET
, reset_val
);
987 val_rd
= nr64(ENET_SERDES_RESET
);
988 val_rd
&= ~reset_val
;
990 nw64(ctrl_reg
, ctrl_val
);
991 nw64(test_cfg_reg
, test_cfg_val
);
992 nw64(ENET_SERDES_RESET
, val_rd
);
995 /* Initialize all 4 lanes of the SERDES. */
996 for (i
= 0; i
< 4; i
++) {
997 u32 rxtx_ctrl
, glue0
;
999 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
1002 err
= esr_read_glue0(np
, i
, &glue0
);
1006 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
1007 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
1008 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
1010 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
1011 ESR_GLUE_CTRL0_THCNT
|
1012 ESR_GLUE_CTRL0_BLTIME
);
1013 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
1014 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
1015 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
1016 (BLTIME_300_CYCLES
<<
1017 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
1019 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
1022 err
= esr_write_glue0(np
, i
, glue0
);
1028 sig
= nr64(ESR_INT_SIGNALS
);
1031 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
1036 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
1044 if ((sig
& mask
) != val
) {
1045 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
1046 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
1053 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
1055 struct niu_link_config
*lp
= &np
->link_config
;
1059 unsigned long flags
;
1063 current_speed
= SPEED_INVALID
;
1064 current_duplex
= DUPLEX_INVALID
;
1066 spin_lock_irqsave(&np
->lock
, flags
);
1068 val
= nr64_pcs(PCS_MII_STAT
);
1070 if (val
& PCS_MII_STAT_LINK_STATUS
) {
1072 current_speed
= SPEED_1000
;
1073 current_duplex
= DUPLEX_FULL
;
1076 lp
->active_speed
= current_speed
;
1077 lp
->active_duplex
= current_duplex
;
1078 spin_unlock_irqrestore(&np
->lock
, flags
);
1080 *link_up_p
= link_up
;
1084 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
1086 unsigned long flags
;
1087 struct niu_link_config
*lp
= &np
->link_config
;
1094 if (!(np
->flags
& NIU_FLAGS_10G
))
1095 return link_status_1g_serdes(np
, link_up_p
);
1097 current_speed
= SPEED_INVALID
;
1098 current_duplex
= DUPLEX_INVALID
;
1099 spin_lock_irqsave(&np
->lock
, flags
);
1101 val
= nr64_xpcs(XPCS_STATUS(0));
1102 val2
= nr64_mac(XMAC_INTER2
);
1103 if (val2
& 0x01000000)
1106 if ((val
& 0x1000ULL
) && link_ok
) {
1108 current_speed
= SPEED_10000
;
1109 current_duplex
= DUPLEX_FULL
;
1111 lp
->active_speed
= current_speed
;
1112 lp
->active_duplex
= current_duplex
;
1113 spin_unlock_irqrestore(&np
->lock
, flags
);
1114 *link_up_p
= link_up
;
1118 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
1120 struct niu_link_config
*lp
= &np
->link_config
;
1121 u16 current_speed
, bmsr
;
1122 unsigned long flags
;
1127 current_speed
= SPEED_INVALID
;
1128 current_duplex
= DUPLEX_INVALID
;
1130 spin_lock_irqsave(&np
->lock
, flags
);
1134 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1139 if (bmsr
& BMSR_LSTATUS
) {
1140 u16 adv
, lpa
, common
, estat
;
1142 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1147 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1154 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1159 current_speed
= SPEED_1000
;
1160 current_duplex
= DUPLEX_FULL
;
1163 lp
->active_speed
= current_speed
;
1164 lp
->active_duplex
= current_duplex
;
1168 spin_unlock_irqrestore(&np
->lock
, flags
);
1170 *link_up_p
= link_up
;
1174 static int bcm8704_reset(struct niu
*np
)
1178 err
= mdio_read(np
, np
->phy_addr
,
1179 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1183 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1189 while (--limit
>= 0) {
1190 err
= mdio_read(np
, np
->phy_addr
,
1191 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1194 if (!(err
& BMCR_RESET
))
1198 dev_err(np
->device
, PFX
"Port %u PHY will not reset "
1199 "(bmcr=%04x)\n", np
->port
, (err
& 0xffff));
1205 /* When written, certain PHY registers need to be read back twice
1206 * in order for the bits to settle properly.
1208 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
1210 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1213 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1219 static int bcm8706_init_user_dev3(struct niu
*np
)
1224 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1225 BCM8704_USER_OPT_DIGITAL_CTRL
);
1228 err
&= ~USER_ODIG_CTRL_GPIOS
;
1229 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1230 err
|= USER_ODIG_CTRL_RESV2
;
1231 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1232 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1241 static int bcm8704_init_user_dev3(struct niu
*np
)
1245 err
= mdio_write(np
, np
->phy_addr
,
1246 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1247 (USER_CONTROL_OPTXRST_LVL
|
1248 USER_CONTROL_OPBIASFLT_LVL
|
1249 USER_CONTROL_OBTMPFLT_LVL
|
1250 USER_CONTROL_OPPRFLT_LVL
|
1251 USER_CONTROL_OPTXFLT_LVL
|
1252 USER_CONTROL_OPRXLOS_LVL
|
1253 USER_CONTROL_OPRXFLT_LVL
|
1254 USER_CONTROL_OPTXON_LVL
|
1255 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1259 err
= mdio_write(np
, np
->phy_addr
,
1260 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1261 (USER_PMD_TX_CTL_XFP_CLKEN
|
1262 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1263 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1264 USER_PMD_TX_CTL_TSCK_LPWREN
));
1268 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1271 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1275 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1276 BCM8704_USER_OPT_DIGITAL_CTRL
);
1279 err
&= ~USER_ODIG_CTRL_GPIOS
;
1280 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1281 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1282 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1291 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1295 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1296 MRVL88X2011_LED_8_TO_11_CTL
);
1300 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1301 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1303 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1304 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1307 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1311 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1312 MRVL88X2011_LED_BLINK_CTL
);
1314 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1317 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1318 MRVL88X2011_LED_BLINK_CTL
, err
);
1324 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1328 /* Set LED functions */
1329 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1334 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1338 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1339 MRVL88X2011_GENERAL_CTL
);
1343 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1345 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1346 MRVL88X2011_GENERAL_CTL
, err
);
1350 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1351 MRVL88X2011_PMA_PMD_CTL_1
);
1355 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1356 err
|= MRVL88X2011_LOOPBACK
;
1358 err
&= ~MRVL88X2011_LOOPBACK
;
1360 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1361 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1366 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1367 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1371 static int xcvr_diag_bcm870x(struct niu
*np
)
1373 u16 analog_stat0
, tx_alarm_status
;
1377 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1381 pr_info(PFX
"Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1384 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1387 pr_info(PFX
"Port %u USER_DEV3(0x20) [%04x]\n",
1390 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1394 pr_info(PFX
"Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1398 /* XXX dig this out it might not be so useful XXX */
1399 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1400 BCM8704_USER_ANALOG_STATUS0
);
1403 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1404 BCM8704_USER_ANALOG_STATUS0
);
1409 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1410 BCM8704_USER_TX_ALARM_STATUS
);
1413 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1414 BCM8704_USER_TX_ALARM_STATUS
);
1417 tx_alarm_status
= err
;
1419 if (analog_stat0
!= 0x03fc) {
1420 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1421 pr_info(PFX
"Port %u cable not connected "
1422 "or bad cable.\n", np
->port
);
1423 } else if (analog_stat0
== 0x639c) {
1424 pr_info(PFX
"Port %u optical module is bad "
1425 "or missing.\n", np
->port
);
1432 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1434 struct niu_link_config
*lp
= &np
->link_config
;
1437 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1442 err
&= ~BMCR_LOOPBACK
;
1444 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1445 err
|= BMCR_LOOPBACK
;
1447 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1455 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1460 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1461 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1464 val
= nr64_mac(XMAC_CONFIG
);
1465 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1466 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1467 nw64_mac(XMAC_CONFIG
, val
);
1469 val
= nr64(MIF_CONFIG
);
1470 val
|= MIF_CONFIG_INDIRECT_MODE
;
1471 nw64(MIF_CONFIG
, val
);
1473 err
= bcm8704_reset(np
);
1477 err
= xcvr_10g_set_lb_bcm870x(np
);
1481 err
= bcm8706_init_user_dev3(np
);
1485 err
= xcvr_diag_bcm870x(np
);
1492 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1496 err
= bcm8704_reset(np
);
1500 err
= bcm8704_init_user_dev3(np
);
1504 err
= xcvr_10g_set_lb_bcm870x(np
);
1508 err
= xcvr_diag_bcm870x(np
);
1515 static int xcvr_init_10g(struct niu
*np
)
1520 val
= nr64_mac(XMAC_CONFIG
);
1521 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1522 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1523 nw64_mac(XMAC_CONFIG
, val
);
1525 /* XXX shared resource, lock parent XXX */
1526 val
= nr64(MIF_CONFIG
);
1527 val
|= MIF_CONFIG_INDIRECT_MODE
;
1528 nw64(MIF_CONFIG
, val
);
1530 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1531 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1533 /* handle different phy types */
1534 switch (phy_id
& NIU_PHY_ID_MASK
) {
1535 case NIU_PHY_ID_MRVL88X2011
:
1536 err
= xcvr_init_10g_mrvl88x2011(np
);
1539 default: /* bcom 8704 */
1540 err
= xcvr_init_10g_bcm8704(np
);
1547 static int mii_reset(struct niu
*np
)
1551 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1556 while (--limit
>= 0) {
1558 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1561 if (!(err
& BMCR_RESET
))
1565 dev_err(np
->device
, PFX
"Port %u MII would not reset, "
1566 "bmcr[%04x]\n", np
->port
, err
);
1573 static int xcvr_init_1g_rgmii(struct niu
*np
)
1577 u16 bmcr
, bmsr
, estat
;
1579 val
= nr64(MIF_CONFIG
);
1580 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1581 nw64(MIF_CONFIG
, val
);
1583 err
= mii_reset(np
);
1587 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1593 if (bmsr
& BMSR_ESTATEN
) {
1594 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1601 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1605 if (bmsr
& BMSR_ESTATEN
) {
1608 if (estat
& ESTATUS_1000_TFULL
)
1609 ctrl1000
|= ADVERTISE_1000FULL
;
1610 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1615 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1617 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1621 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1624 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1626 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1633 static int mii_init_common(struct niu
*np
)
1635 struct niu_link_config
*lp
= &np
->link_config
;
1636 u16 bmcr
, bmsr
, adv
, estat
;
1639 err
= mii_reset(np
);
1643 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1649 if (bmsr
& BMSR_ESTATEN
) {
1650 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1657 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1661 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1662 bmcr
|= BMCR_LOOPBACK
;
1663 if (lp
->active_speed
== SPEED_1000
)
1664 bmcr
|= BMCR_SPEED1000
;
1665 if (lp
->active_duplex
== DUPLEX_FULL
)
1666 bmcr
|= BMCR_FULLDPLX
;
1669 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1672 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1673 BCM5464R_AUX_CTL_WRITE_1
);
1674 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1679 /* XXX configurable XXX */
1680 /* XXX for now don't advertise half-duplex or asym pause... XXX */
1681 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1682 if (bmsr
& BMSR_10FULL
)
1683 adv
|= ADVERTISE_10FULL
;
1684 if (bmsr
& BMSR_100FULL
)
1685 adv
|= ADVERTISE_100FULL
;
1686 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1690 if (bmsr
& BMSR_ESTATEN
) {
1693 if (estat
& ESTATUS_1000_TFULL
)
1694 ctrl1000
|= ADVERTISE_1000FULL
;
1695 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1699 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1701 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1705 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1708 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1712 pr_info(PFX
"Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1713 np
->port
, bmcr
, bmsr
);
1719 static int xcvr_init_1g(struct niu
*np
)
1723 /* XXX shared resource, lock parent XXX */
1724 val
= nr64(MIF_CONFIG
);
1725 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1726 nw64(MIF_CONFIG
, val
);
1728 return mii_init_common(np
);
1731 static int niu_xcvr_init(struct niu
*np
)
1733 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1738 err
= ops
->xcvr_init(np
);
1743 static int niu_serdes_init(struct niu
*np
)
1745 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1749 if (ops
->serdes_init
)
1750 err
= ops
->serdes_init(np
);
1755 static void niu_init_xif(struct niu
*);
1756 static void niu_handle_led(struct niu
*, int status
);
1758 static int niu_link_status_common(struct niu
*np
, int link_up
)
1760 struct niu_link_config
*lp
= &np
->link_config
;
1761 struct net_device
*dev
= np
->dev
;
1762 unsigned long flags
;
1764 if (!netif_carrier_ok(dev
) && link_up
) {
1765 niuinfo(LINK
, "%s: Link is up at %s, %s duplex\n",
1767 (lp
->active_speed
== SPEED_10000
?
1769 (lp
->active_speed
== SPEED_1000
?
1771 (lp
->active_speed
== SPEED_100
?
1772 "100Mbit/sec" : "10Mbit/sec"))),
1773 (lp
->active_duplex
== DUPLEX_FULL
?
1776 spin_lock_irqsave(&np
->lock
, flags
);
1778 niu_handle_led(np
, 1);
1779 spin_unlock_irqrestore(&np
->lock
, flags
);
1781 netif_carrier_on(dev
);
1782 } else if (netif_carrier_ok(dev
) && !link_up
) {
1783 niuwarn(LINK
, "%s: Link is down\n", dev
->name
);
1784 spin_lock_irqsave(&np
->lock
, flags
);
1785 niu_handle_led(np
, 0);
1786 spin_unlock_irqrestore(&np
->lock
, flags
);
1787 netif_carrier_off(dev
);
1793 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1795 int err
, link_up
, pma_status
, pcs_status
;
1799 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1800 MRVL88X2011_10G_PMD_STATUS_2
);
1804 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1805 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1806 MRVL88X2011_PMA_PMD_STATUS_1
);
1810 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1812 /* Check PMC Register : 3.0001.2 == 1: read twice */
1813 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1814 MRVL88X2011_PMA_PMD_STATUS_1
);
1818 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1819 MRVL88X2011_PMA_PMD_STATUS_1
);
1823 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1825 /* Check XGXS Register : 4.0018.[0-3,12] */
1826 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1827 MRVL88X2011_10G_XGXS_LANE_STAT
);
1831 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1832 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1833 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1835 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1837 np
->link_config
.active_speed
= SPEED_10000
;
1838 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1841 mrvl88x2011_act_led(np
, (link_up
?
1842 MRVL88X2011_LED_CTL_PCS_ACT
:
1843 MRVL88X2011_LED_CTL_OFF
));
1845 *link_up_p
= link_up
;
1849 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
1854 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1855 BCM8704_PMD_RCV_SIGDET
);
1858 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1863 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1864 BCM8704_PCS_10G_R_STATUS
);
1868 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1873 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1874 BCM8704_PHYXS_XGXS_LANE_STAT
);
1877 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1878 PHYXS_XGXS_LANE_STAT_MAGIC
|
1879 PHYXS_XGXS_LANE_STAT_PATTEST
|
1880 PHYXS_XGXS_LANE_STAT_LANE3
|
1881 PHYXS_XGXS_LANE_STAT_LANE2
|
1882 PHYXS_XGXS_LANE_STAT_LANE1
|
1883 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1885 np
->link_config
.active_speed
= SPEED_INVALID
;
1886 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
1891 np
->link_config
.active_speed
= SPEED_10000
;
1892 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1896 *link_up_p
= link_up
;
1897 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
1902 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
1908 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1909 BCM8704_PMD_RCV_SIGDET
);
1912 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1917 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1918 BCM8704_PCS_10G_R_STATUS
);
1921 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1926 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1927 BCM8704_PHYXS_XGXS_LANE_STAT
);
1931 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1932 PHYXS_XGXS_LANE_STAT_MAGIC
|
1933 PHYXS_XGXS_LANE_STAT_LANE3
|
1934 PHYXS_XGXS_LANE_STAT_LANE2
|
1935 PHYXS_XGXS_LANE_STAT_LANE1
|
1936 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1942 np
->link_config
.active_speed
= SPEED_10000
;
1943 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1947 *link_up_p
= link_up
;
1951 static int link_status_10g(struct niu
*np
, int *link_up_p
)
1953 unsigned long flags
;
1956 spin_lock_irqsave(&np
->lock
, flags
);
1958 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
1961 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1962 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1964 /* handle different phy types */
1965 switch (phy_id
& NIU_PHY_ID_MASK
) {
1966 case NIU_PHY_ID_MRVL88X2011
:
1967 err
= link_status_10g_mrvl(np
, link_up_p
);
1970 default: /* bcom 8704 */
1971 err
= link_status_10g_bcom(np
, link_up_p
);
1976 spin_unlock_irqrestore(&np
->lock
, flags
);
1981 static int niu_10g_phy_present(struct niu
*np
)
1985 sig
= nr64(ESR_INT_SIGNALS
);
1988 mask
= ESR_INT_SIGNALS_P0_BITS
;
1989 val
= (ESR_INT_SRDY0_P0
|
1992 ESR_INT_XDP_P0_CH3
|
1993 ESR_INT_XDP_P0_CH2
|
1994 ESR_INT_XDP_P0_CH1
|
1995 ESR_INT_XDP_P0_CH0
);
1999 mask
= ESR_INT_SIGNALS_P1_BITS
;
2000 val
= (ESR_INT_SRDY0_P1
|
2003 ESR_INT_XDP_P1_CH3
|
2004 ESR_INT_XDP_P1_CH2
|
2005 ESR_INT_XDP_P1_CH1
|
2006 ESR_INT_XDP_P1_CH0
);
2013 if ((sig
& mask
) != val
)
2018 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
2020 unsigned long flags
;
2023 int phy_present_prev
;
2025 spin_lock_irqsave(&np
->lock
, flags
);
2027 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2028 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
2030 phy_present
= niu_10g_phy_present(np
);
2031 if (phy_present
!= phy_present_prev
) {
2034 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2035 if (np
->phy_ops
->xcvr_init
)
2036 err
= np
->phy_ops
->xcvr_init(np
);
2039 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2042 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2044 niuwarn(LINK
, "%s: Hotplug PHY Removed\n",
2048 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
)
2049 err
= link_status_10g_bcm8706(np
, link_up_p
);
2052 spin_unlock_irqrestore(&np
->lock
, flags
);
2057 static int link_status_1g(struct niu
*np
, int *link_up_p
)
2059 struct niu_link_config
*lp
= &np
->link_config
;
2060 u16 current_speed
, bmsr
;
2061 unsigned long flags
;
2066 current_speed
= SPEED_INVALID
;
2067 current_duplex
= DUPLEX_INVALID
;
2069 spin_lock_irqsave(&np
->lock
, flags
);
2072 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
2075 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
2080 if (bmsr
& BMSR_LSTATUS
) {
2081 u16 adv
, lpa
, common
, estat
;
2083 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
2088 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
2095 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
2101 if (estat
& (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
)) {
2102 current_speed
= SPEED_1000
;
2103 if (estat
& ESTATUS_1000_TFULL
)
2104 current_duplex
= DUPLEX_FULL
;
2106 current_duplex
= DUPLEX_HALF
;
2108 if (common
& ADVERTISE_100BASE4
) {
2109 current_speed
= SPEED_100
;
2110 current_duplex
= DUPLEX_HALF
;
2111 } else if (common
& ADVERTISE_100FULL
) {
2112 current_speed
= SPEED_100
;
2113 current_duplex
= DUPLEX_FULL
;
2114 } else if (common
& ADVERTISE_100HALF
) {
2115 current_speed
= SPEED_100
;
2116 current_duplex
= DUPLEX_HALF
;
2117 } else if (common
& ADVERTISE_10FULL
) {
2118 current_speed
= SPEED_10
;
2119 current_duplex
= DUPLEX_FULL
;
2120 } else if (common
& ADVERTISE_10HALF
) {
2121 current_speed
= SPEED_10
;
2122 current_duplex
= DUPLEX_HALF
;
2127 lp
->active_speed
= current_speed
;
2128 lp
->active_duplex
= current_duplex
;
2132 spin_unlock_irqrestore(&np
->lock
, flags
);
2134 *link_up_p
= link_up
;
2138 static int niu_link_status(struct niu
*np
, int *link_up_p
)
2140 const struct niu_phy_ops
*ops
= np
->phy_ops
;
2144 if (ops
->link_status
)
2145 err
= ops
->link_status(np
, link_up_p
);
2150 static void niu_timer(unsigned long __opaque
)
2152 struct niu
*np
= (struct niu
*) __opaque
;
2156 err
= niu_link_status(np
, &link_up
);
2158 niu_link_status_common(np
, link_up
);
2160 if (netif_carrier_ok(np
->dev
))
2164 np
->timer
.expires
= jiffies
+ off
;
2166 add_timer(&np
->timer
);
2169 static const struct niu_phy_ops phy_ops_10g_serdes
= {
2170 .serdes_init
= serdes_init_10g_serdes
,
2171 .link_status
= link_status_10g_serdes
,
2174 static const struct niu_phy_ops phy_ops_10g_serdes_niu
= {
2175 .serdes_init
= serdes_init_niu_10g_serdes
,
2176 .link_status
= link_status_10g_serdes
,
2179 static const struct niu_phy_ops phy_ops_1g_serdes_niu
= {
2180 .serdes_init
= serdes_init_niu_1g_serdes
,
2181 .link_status
= link_status_1g_serdes
,
2184 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
2185 .xcvr_init
= xcvr_init_1g_rgmii
,
2186 .link_status
= link_status_1g_rgmii
,
2189 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
2190 .serdes_init
= serdes_init_niu_10g_fiber
,
2191 .xcvr_init
= xcvr_init_10g
,
2192 .link_status
= link_status_10g
,
2195 static const struct niu_phy_ops phy_ops_10g_fiber
= {
2196 .serdes_init
= serdes_init_10g
,
2197 .xcvr_init
= xcvr_init_10g
,
2198 .link_status
= link_status_10g
,
2201 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
2202 .serdes_init
= serdes_init_10g
,
2203 .xcvr_init
= xcvr_init_10g_bcm8706
,
2204 .link_status
= link_status_10g_hotplug
,
2207 static const struct niu_phy_ops phy_ops_10g_copper
= {
2208 .serdes_init
= serdes_init_10g
,
2209 .link_status
= link_status_10g
, /* XXX */
2212 static const struct niu_phy_ops phy_ops_1g_fiber
= {
2213 .serdes_init
= serdes_init_1g
,
2214 .xcvr_init
= xcvr_init_1g
,
2215 .link_status
= link_status_1g
,
2218 static const struct niu_phy_ops phy_ops_1g_copper
= {
2219 .xcvr_init
= xcvr_init_1g
,
2220 .link_status
= link_status_1g
,
2223 struct niu_phy_template
{
2224 const struct niu_phy_ops
*ops
;
2228 static const struct niu_phy_template phy_template_niu_10g_fiber
= {
2229 .ops
= &phy_ops_10g_fiber_niu
,
2230 .phy_addr_base
= 16,
2233 static const struct niu_phy_template phy_template_niu_10g_serdes
= {
2234 .ops
= &phy_ops_10g_serdes_niu
,
2238 static const struct niu_phy_template phy_template_niu_1g_serdes
= {
2239 .ops
= &phy_ops_1g_serdes_niu
,
2243 static const struct niu_phy_template phy_template_10g_fiber
= {
2244 .ops
= &phy_ops_10g_fiber
,
2248 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2249 .ops
= &phy_ops_10g_fiber_hotplug
,
2253 static const struct niu_phy_template phy_template_10g_copper
= {
2254 .ops
= &phy_ops_10g_copper
,
2255 .phy_addr_base
= 10,
2258 static const struct niu_phy_template phy_template_1g_fiber
= {
2259 .ops
= &phy_ops_1g_fiber
,
2263 static const struct niu_phy_template phy_template_1g_copper
= {
2264 .ops
= &phy_ops_1g_copper
,
2268 static const struct niu_phy_template phy_template_1g_rgmii
= {
2269 .ops
= &phy_ops_1g_rgmii
,
2273 static const struct niu_phy_template phy_template_10g_serdes
= {
2274 .ops
= &phy_ops_10g_serdes
,
2278 static int niu_atca_port_num
[4] = {
2282 static int serdes_init_10g_serdes(struct niu
*np
)
2284 struct niu_link_config
*lp
= &np
->link_config
;
2285 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2286 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2292 reset_val
= ENET_SERDES_RESET_0
;
2293 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2294 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2295 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2298 reset_val
= ENET_SERDES_RESET_1
;
2299 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2300 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2301 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2307 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2308 ENET_SERDES_CTRL_SDET_1
|
2309 ENET_SERDES_CTRL_SDET_2
|
2310 ENET_SERDES_CTRL_SDET_3
|
2311 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2312 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2313 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2314 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2315 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2316 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2317 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2318 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2321 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2322 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2323 ENET_SERDES_TEST_MD_0_SHIFT
) |
2324 (ENET_TEST_MD_PAD_LOOPBACK
<<
2325 ENET_SERDES_TEST_MD_1_SHIFT
) |
2326 (ENET_TEST_MD_PAD_LOOPBACK
<<
2327 ENET_SERDES_TEST_MD_2_SHIFT
) |
2328 (ENET_TEST_MD_PAD_LOOPBACK
<<
2329 ENET_SERDES_TEST_MD_3_SHIFT
));
2333 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2334 nw64(ctrl_reg
, ctrl_val
);
2335 nw64(test_cfg_reg
, test_cfg_val
);
2337 /* Initialize all 4 lanes of the SERDES. */
2338 for (i
= 0; i
< 4; i
++) {
2339 u32 rxtx_ctrl
, glue0
;
2341 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2344 err
= esr_read_glue0(np
, i
, &glue0
);
2348 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2349 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2350 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2352 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2353 ESR_GLUE_CTRL0_THCNT
|
2354 ESR_GLUE_CTRL0_BLTIME
);
2355 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2356 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2357 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2358 (BLTIME_300_CYCLES
<<
2359 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2361 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2364 err
= esr_write_glue0(np
, i
, glue0
);
2370 sig
= nr64(ESR_INT_SIGNALS
);
2373 mask
= ESR_INT_SIGNALS_P0_BITS
;
2374 val
= (ESR_INT_SRDY0_P0
|
2377 ESR_INT_XDP_P0_CH3
|
2378 ESR_INT_XDP_P0_CH2
|
2379 ESR_INT_XDP_P0_CH1
|
2380 ESR_INT_XDP_P0_CH0
);
2384 mask
= ESR_INT_SIGNALS_P1_BITS
;
2385 val
= (ESR_INT_SRDY0_P1
|
2388 ESR_INT_XDP_P1_CH3
|
2389 ESR_INT_XDP_P1_CH2
|
2390 ESR_INT_XDP_P1_CH1
|
2391 ESR_INT_XDP_P1_CH0
);
2398 if ((sig
& mask
) != val
) {
2400 err
= serdes_init_1g_serdes(np
);
2402 np
->flags
&= ~NIU_FLAGS_10G
;
2403 np
->mac_xcvr
= MAC_XCVR_PCS
;
2405 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES Link Failed \n",
2414 static int niu_determine_phy_disposition(struct niu
*np
)
2416 struct niu_parent
*parent
= np
->parent
;
2417 u8 plat_type
= parent
->plat_type
;
2418 const struct niu_phy_template
*tp
;
2419 u32 phy_addr_off
= 0;
2421 if (plat_type
== PLAT_TYPE_NIU
) {
2425 NIU_FLAGS_XCVR_SERDES
)) {
2426 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2428 tp
= &phy_template_niu_10g_serdes
;
2430 case NIU_FLAGS_XCVR_SERDES
:
2432 tp
= &phy_template_niu_1g_serdes
;
2434 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2437 tp
= &phy_template_niu_10g_fiber
;
2438 phy_addr_off
+= np
->port
;
2445 NIU_FLAGS_XCVR_SERDES
)) {
2448 tp
= &phy_template_1g_copper
;
2449 if (plat_type
== PLAT_TYPE_VF_P0
)
2451 else if (plat_type
== PLAT_TYPE_VF_P1
)
2454 phy_addr_off
+= (np
->port
^ 0x3);
2459 tp
= &phy_template_1g_copper
;
2462 case NIU_FLAGS_FIBER
:
2464 tp
= &phy_template_1g_fiber
;
2467 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2469 tp
= &phy_template_10g_fiber
;
2470 if (plat_type
== PLAT_TYPE_VF_P0
||
2471 plat_type
== PLAT_TYPE_VF_P1
)
2473 phy_addr_off
+= np
->port
;
2474 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2475 tp
= &phy_template_10g_fiber_hotplug
;
2483 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2484 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2485 case NIU_FLAGS_XCVR_SERDES
:
2489 tp
= &phy_template_10g_serdes
;
2493 tp
= &phy_template_1g_rgmii
;
2499 phy_addr_off
= niu_atca_port_num
[np
->port
];
2507 np
->phy_ops
= tp
->ops
;
2508 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2513 static int niu_init_link(struct niu
*np
)
2515 struct niu_parent
*parent
= np
->parent
;
2518 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2519 err
= niu_xcvr_init(np
);
2524 err
= niu_serdes_init(np
);
2528 err
= niu_xcvr_init(np
);
2530 niu_link_status(np
, &ignore
);
2534 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2536 u16 reg0
= addr
[4] << 8 | addr
[5];
2537 u16 reg1
= addr
[2] << 8 | addr
[3];
2538 u16 reg2
= addr
[0] << 8 | addr
[1];
2540 if (np
->flags
& NIU_FLAGS_XMAC
) {
2541 nw64_mac(XMAC_ADDR0
, reg0
);
2542 nw64_mac(XMAC_ADDR1
, reg1
);
2543 nw64_mac(XMAC_ADDR2
, reg2
);
2545 nw64_mac(BMAC_ADDR0
, reg0
);
2546 nw64_mac(BMAC_ADDR1
, reg1
);
2547 nw64_mac(BMAC_ADDR2
, reg2
);
2551 static int niu_num_alt_addr(struct niu
*np
)
2553 if (np
->flags
& NIU_FLAGS_XMAC
)
2554 return XMAC_NUM_ALT_ADDR
;
2556 return BMAC_NUM_ALT_ADDR
;
2559 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2561 u16 reg0
= addr
[4] << 8 | addr
[5];
2562 u16 reg1
= addr
[2] << 8 | addr
[3];
2563 u16 reg2
= addr
[0] << 8 | addr
[1];
2565 if (index
>= niu_num_alt_addr(np
))
2568 if (np
->flags
& NIU_FLAGS_XMAC
) {
2569 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2570 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2571 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2573 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2574 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2575 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2581 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2586 if (index
>= niu_num_alt_addr(np
))
2589 if (np
->flags
& NIU_FLAGS_XMAC
) {
2590 reg
= XMAC_ADDR_CMPEN
;
2593 reg
= BMAC_ADDR_CMPEN
;
2594 mask
= 1 << (index
+ 1);
2597 val
= nr64_mac(reg
);
2607 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2608 int num
, int mac_pref
)
2610 u64 val
= nr64_mac(reg
);
2611 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2614 val
|= HOST_INFO_MPR
;
2618 static int __set_rdc_table_num(struct niu
*np
,
2619 int xmac_index
, int bmac_index
,
2620 int rdc_table_num
, int mac_pref
)
2624 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2626 if (np
->flags
& NIU_FLAGS_XMAC
)
2627 reg
= XMAC_HOST_INFO(xmac_index
);
2629 reg
= BMAC_HOST_INFO(bmac_index
);
2630 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2634 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2637 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2640 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2643 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2646 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2647 int table_num
, int mac_pref
)
2649 if (idx
>= niu_num_alt_addr(np
))
2651 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2654 static u64
vlan_entry_set_parity(u64 reg_val
)
2659 port01_mask
= 0x00ff;
2660 port23_mask
= 0xff00;
2662 if (hweight64(reg_val
& port01_mask
) & 1)
2663 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2665 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2667 if (hweight64(reg_val
& port23_mask
) & 1)
2668 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2670 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2675 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2676 int port
, int vpr
, int rdc_table
)
2678 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2680 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2681 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2682 ENET_VLAN_TBL_SHIFT(port
));
2684 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2685 ENET_VLAN_TBL_SHIFT(port
));
2686 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2688 reg_val
= vlan_entry_set_parity(reg_val
);
2690 nw64(ENET_VLAN_TBL(index
), reg_val
);
2693 static void vlan_tbl_clear(struct niu
*np
)
2697 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2698 nw64(ENET_VLAN_TBL(i
), 0);
2701 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2705 while (--limit
> 0) {
2706 if (nr64(TCAM_CTL
) & bit
)
2716 static int tcam_flush(struct niu
*np
, int index
)
2718 nw64(TCAM_KEY_0
, 0x00);
2719 nw64(TCAM_KEY_MASK_0
, 0xff);
2720 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2722 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2726 static int tcam_read(struct niu
*np
, int index
,
2727 u64
*key
, u64
*mask
)
2731 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2732 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2734 key
[0] = nr64(TCAM_KEY_0
);
2735 key
[1] = nr64(TCAM_KEY_1
);
2736 key
[2] = nr64(TCAM_KEY_2
);
2737 key
[3] = nr64(TCAM_KEY_3
);
2738 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2739 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2740 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2741 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2747 static int tcam_write(struct niu
*np
, int index
,
2748 u64
*key
, u64
*mask
)
2750 nw64(TCAM_KEY_0
, key
[0]);
2751 nw64(TCAM_KEY_1
, key
[1]);
2752 nw64(TCAM_KEY_2
, key
[2]);
2753 nw64(TCAM_KEY_3
, key
[3]);
2754 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2755 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2756 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2757 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2758 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2760 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2764 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2768 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2769 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2771 *data
= nr64(TCAM_KEY_1
);
2777 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2779 nw64(TCAM_KEY_1
, assoc_data
);
2780 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2782 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2785 static void tcam_enable(struct niu
*np
, int on
)
2787 u64 val
= nr64(FFLP_CFG_1
);
2790 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2792 val
|= FFLP_CFG_1_TCAM_DIS
;
2793 nw64(FFLP_CFG_1
, val
);
2796 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2798 u64 val
= nr64(FFLP_CFG_1
);
2800 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2802 FFLP_CFG_1_CAMRATIO
);
2803 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2804 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2805 nw64(FFLP_CFG_1
, val
);
2807 val
= nr64(FFLP_CFG_1
);
2808 val
|= FFLP_CFG_1_FFLPINITDONE
;
2809 nw64(FFLP_CFG_1
, val
);
2812 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2818 if (class < CLASS_CODE_ETHERTYPE1
||
2819 class > CLASS_CODE_ETHERTYPE2
)
2822 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2834 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2840 if (class < CLASS_CODE_ETHERTYPE1
||
2841 class > CLASS_CODE_ETHERTYPE2
||
2842 (ether_type
& ~(u64
)0xffff) != 0)
2845 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2847 val
&= ~L2_CLS_ETYPE
;
2848 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2855 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2861 if (class < CLASS_CODE_USER_PROG1
||
2862 class > CLASS_CODE_USER_PROG4
)
2865 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2868 val
|= L3_CLS_VALID
;
2870 val
&= ~L3_CLS_VALID
;
2877 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2878 int ipv6
, u64 protocol_id
,
2879 u64 tos_mask
, u64 tos_val
)
2884 if (class < CLASS_CODE_USER_PROG1
||
2885 class > CLASS_CODE_USER_PROG4
||
2886 (protocol_id
& ~(u64
)0xff) != 0 ||
2887 (tos_mask
& ~(u64
)0xff) != 0 ||
2888 (tos_val
& ~(u64
)0xff) != 0)
2891 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2893 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
2894 L3_CLS_TOSMASK
| L3_CLS_TOS
);
2896 val
|= L3_CLS_IPVER
;
2897 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
2898 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
2899 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
2906 static int tcam_early_init(struct niu
*np
)
2912 tcam_set_lat_and_ratio(np
,
2913 DEFAULT_TCAM_LATENCY
,
2914 DEFAULT_TCAM_ACCESS_RATIO
);
2915 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
2916 err
= tcam_user_eth_class_enable(np
, i
, 0);
2920 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
2921 err
= tcam_user_ip_class_enable(np
, i
, 0);
2929 static int tcam_flush_all(struct niu
*np
)
2933 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
2934 int err
= tcam_flush(np
, i
);
2941 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
2943 return ((u64
)index
| (num_entries
== 1 ?
2944 HASH_TBL_ADDR_AUTOINC
: 0));
2948 static int hash_read(struct niu
*np
, unsigned long partition
,
2949 unsigned long index
, unsigned long num_entries
,
2952 u64 val
= hash_addr_regval(index
, num_entries
);
2955 if (partition
>= FCRAM_NUM_PARTITIONS
||
2956 index
+ num_entries
> FCRAM_SIZE
)
2959 nw64(HASH_TBL_ADDR(partition
), val
);
2960 for (i
= 0; i
< num_entries
; i
++)
2961 data
[i
] = nr64(HASH_TBL_DATA(partition
));
2967 static int hash_write(struct niu
*np
, unsigned long partition
,
2968 unsigned long index
, unsigned long num_entries
,
2971 u64 val
= hash_addr_regval(index
, num_entries
);
2974 if (partition
>= FCRAM_NUM_PARTITIONS
||
2975 index
+ (num_entries
* 8) > FCRAM_SIZE
)
2978 nw64(HASH_TBL_ADDR(partition
), val
);
2979 for (i
= 0; i
< num_entries
; i
++)
2980 nw64(HASH_TBL_DATA(partition
), data
[i
]);
2985 static void fflp_reset(struct niu
*np
)
2989 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
2991 nw64(FFLP_CFG_1
, 0);
2993 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
2994 nw64(FFLP_CFG_1
, val
);
2997 static void fflp_set_timings(struct niu
*np
)
2999 u64 val
= nr64(FFLP_CFG_1
);
3001 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
3002 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
3003 nw64(FFLP_CFG_1
, val
);
3005 val
= nr64(FFLP_CFG_1
);
3006 val
|= FFLP_CFG_1_FFLPINITDONE
;
3007 nw64(FFLP_CFG_1
, val
);
3009 val
= nr64(FCRAM_REF_TMR
);
3010 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
3011 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
3012 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
3013 nw64(FCRAM_REF_TMR
, val
);
3016 static int fflp_set_partition(struct niu
*np
, u64 partition
,
3017 u64 mask
, u64 base
, int enable
)
3022 if (partition
>= FCRAM_NUM_PARTITIONS
||
3023 (mask
& ~(u64
)0x1f) != 0 ||
3024 (base
& ~(u64
)0x1f) != 0)
3027 reg
= FLW_PRT_SEL(partition
);
3030 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
3031 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
3032 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
3034 val
|= FLW_PRT_SEL_EXT
;
3040 static int fflp_disable_all_partitions(struct niu
*np
)
3044 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
3045 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
3052 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
3054 u64 val
= nr64(FFLP_CFG_1
);
3057 val
|= FFLP_CFG_1_LLCSNAP
;
3059 val
&= ~FFLP_CFG_1_LLCSNAP
;
3060 nw64(FFLP_CFG_1
, val
);
3063 static void fflp_errors_enable(struct niu
*np
, int on
)
3065 u64 val
= nr64(FFLP_CFG_1
);
3068 val
&= ~FFLP_CFG_1_ERRORDIS
;
3070 val
|= FFLP_CFG_1_ERRORDIS
;
3071 nw64(FFLP_CFG_1
, val
);
3074 static int fflp_hash_clear(struct niu
*np
)
3076 struct fcram_hash_ipv4 ent
;
3079 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3080 memset(&ent
, 0, sizeof(ent
));
3081 ent
.header
= HASH_HEADER_EXT
;
3083 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
3084 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
3091 static int fflp_early_init(struct niu
*np
)
3093 struct niu_parent
*parent
;
3094 unsigned long flags
;
3097 niu_lock_parent(np
, flags
);
3099 parent
= np
->parent
;
3101 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
3102 niudbg(PROBE
, "fflp_early_init: Initting hw on port %u\n",
3104 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3106 fflp_set_timings(np
);
3107 err
= fflp_disable_all_partitions(np
);
3109 niudbg(PROBE
, "fflp_disable_all_partitions "
3110 "failed, err=%d\n", err
);
3115 err
= tcam_early_init(np
);
3117 niudbg(PROBE
, "tcam_early_init failed, err=%d\n",
3121 fflp_llcsnap_enable(np
, 1);
3122 fflp_errors_enable(np
, 0);
3126 err
= tcam_flush_all(np
);
3128 niudbg(PROBE
, "tcam_flush_all failed, err=%d\n",
3132 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3133 err
= fflp_hash_clear(np
);
3135 niudbg(PROBE
, "fflp_hash_clear failed, "
3143 niudbg(PROBE
, "fflp_early_init: Success\n");
3144 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
3147 niu_unlock_parent(np
, flags
);
3151 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3153 if (class_code
< CLASS_CODE_USER_PROG1
||
3154 class_code
> CLASS_CODE_SCTP_IPV6
)
3157 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3161 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3163 if (class_code
< CLASS_CODE_USER_PROG1
||
3164 class_code
> CLASS_CODE_SCTP_IPV6
)
3167 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3171 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
3172 u32 offset
, u32 size
)
3174 int i
= skb_shinfo(skb
)->nr_frags
;
3175 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3178 frag
->page_offset
= offset
;
3182 skb
->data_len
+= size
;
3183 skb
->truesize
+= size
;
3185 skb_shinfo(skb
)->nr_frags
= i
+ 1;
3188 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
3191 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
3193 return (a
& (MAX_RBR_RING_SIZE
- 1));
3196 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
3197 struct page
***link
)
3199 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
3200 struct page
*p
, **pp
;
3203 pp
= &rp
->rxhash
[h
];
3204 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
3205 if (p
->index
== addr
) {
3214 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
3216 unsigned int h
= niu_hash_rxaddr(rp
, base
);
3219 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
3220 rp
->rxhash
[h
] = page
;
3223 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
3224 gfp_t mask
, int start_index
)
3230 page
= alloc_page(mask
);
3234 addr
= np
->ops
->map_page(np
->device
, page
, 0,
3235 PAGE_SIZE
, DMA_FROM_DEVICE
);
3237 niu_hash_page(rp
, page
, addr
);
3238 if (rp
->rbr_blocks_per_page
> 1)
3239 atomic_add(rp
->rbr_blocks_per_page
- 1,
3240 &compound_head(page
)->_count
);
3242 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
3243 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
3245 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
3246 addr
+= rp
->rbr_block_size
;
3252 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3254 int index
= rp
->rbr_index
;
3257 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3258 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3260 if (unlikely(err
)) {
3265 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3266 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3267 if (rp
->rbr_index
== rp
->rbr_table_size
)
3270 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3271 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3272 rp
->rbr_pending
= 0;
3277 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3279 unsigned int index
= rp
->rcr_index
;
3284 struct page
*page
, **link
;
3290 val
= le64_to_cpup(&rp
->rcr
[index
]);
3291 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3292 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3293 page
= niu_find_rxpage(rp
, addr
, &link
);
3295 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3296 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3297 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3298 *link
= (struct page
*) page
->mapping
;
3299 np
->ops
->unmap_page(np
->device
, page
->index
,
3300 PAGE_SIZE
, DMA_FROM_DEVICE
);
3302 page
->mapping
= NULL
;
3304 rp
->rbr_refill_pending
++;
3307 index
= NEXT_RCR(rp
, index
);
3308 if (!(val
& RCR_ENTRY_MULTI
))
3312 rp
->rcr_index
= index
;
3317 static int niu_process_rx_pkt(struct niu
*np
, struct rx_ring_info
*rp
)
3319 unsigned int index
= rp
->rcr_index
;
3320 struct sk_buff
*skb
;
3323 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3325 return niu_rx_pkt_ignore(np
, rp
);
3329 struct page
*page
, **link
;
3330 u32 rcr_size
, append_size
;
3335 val
= le64_to_cpup(&rp
->rcr
[index
]);
3337 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3338 RCR_ENTRY_L2_LEN_SHIFT
;
3341 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3342 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3343 page
= niu_find_rxpage(rp
, addr
, &link
);
3345 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3346 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3348 off
= addr
& ~PAGE_MASK
;
3349 append_size
= rcr_size
;
3356 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3357 if ((ptype
== RCR_PKT_TYPE_TCP
||
3358 ptype
== RCR_PKT_TYPE_UDP
) &&
3359 !(val
& (RCR_ENTRY_NOPORT
|
3361 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3363 skb
->ip_summed
= CHECKSUM_NONE
;
3365 if (!(val
& RCR_ENTRY_MULTI
))
3366 append_size
= len
- skb
->len
;
3368 niu_rx_skb_append(skb
, page
, off
, append_size
);
3369 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3370 *link
= (struct page
*) page
->mapping
;
3371 np
->ops
->unmap_page(np
->device
, page
->index
,
3372 PAGE_SIZE
, DMA_FROM_DEVICE
);
3374 page
->mapping
= NULL
;
3375 rp
->rbr_refill_pending
++;
3379 index
= NEXT_RCR(rp
, index
);
3380 if (!(val
& RCR_ENTRY_MULTI
))
3384 rp
->rcr_index
= index
;
3386 skb_reserve(skb
, NET_IP_ALIGN
);
3387 __pskb_pull_tail(skb
, min(len
, NIU_RXPULL_MAX
));
3390 rp
->rx_bytes
+= skb
->len
;
3392 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3393 netif_receive_skb(skb
);
3398 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3400 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3401 int err
, index
= rp
->rbr_index
;
3404 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3405 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3409 index
+= blocks_per_page
;
3412 rp
->rbr_index
= index
;
3416 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3420 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3423 page
= rp
->rxhash
[i
];
3425 struct page
*next
= (struct page
*) page
->mapping
;
3426 u64 base
= page
->index
;
3428 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3431 page
->mapping
= NULL
;
3439 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3440 rp
->rbr
[i
] = cpu_to_le32(0);
3444 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3446 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3447 struct sk_buff
*skb
= tb
->skb
;
3448 struct tx_pkt_hdr
*tp
;
3452 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3453 tx_flags
= le64_to_cpup(&tp
->flags
);
3456 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3457 ((tx_flags
& TXHDR_PAD
) / 2));
3459 len
= skb_headlen(skb
);
3460 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3461 len
, DMA_TO_DEVICE
);
3463 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3468 idx
= NEXT_TX(rp
, idx
);
3469 len
-= MAX_TX_DESC_LEN
;
3472 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3473 tb
= &rp
->tx_buffs
[idx
];
3474 BUG_ON(tb
->skb
!= NULL
);
3475 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3476 skb_shinfo(skb
)->frags
[i
].size
,
3478 idx
= NEXT_TX(rp
, idx
);
3486 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3488 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3490 struct netdev_queue
*txq
;
3495 index
= (rp
- np
->tx_rings
);
3496 txq
= netdev_get_tx_queue(np
->dev
, index
);
3499 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3502 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3503 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3504 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3506 rp
->last_pkt_cnt
= tmp
;
3510 niudbg(TX_DONE
, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3511 np
->dev
->name
, pkt_cnt
, cons
);
3514 cons
= release_tx_packet(np
, rp
, cons
);
3520 if (unlikely(netif_tx_queue_stopped(txq
) &&
3521 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3522 __netif_tx_lock(txq
, smp_processor_id());
3523 if (netif_tx_queue_stopped(txq
) &&
3524 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3525 netif_tx_wake_queue(txq
);
3526 __netif_tx_unlock(txq
);
3530 static int niu_rx_work(struct niu
*np
, struct rx_ring_info
*rp
, int budget
)
3532 int qlen
, rcr_done
= 0, work_done
= 0;
3533 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3537 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3538 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3540 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3541 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3543 mbox
->rx_dma_ctl_stat
= 0;
3544 mbox
->rcrstat_a
= 0;
3546 niudbg(RX_STATUS
, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3547 np
->dev
->name
, rp
->rx_channel
, (unsigned long long) stat
, qlen
);
3549 rcr_done
= work_done
= 0;
3550 qlen
= min(qlen
, budget
);
3551 while (work_done
< qlen
) {
3552 rcr_done
+= niu_process_rx_pkt(np
, rp
);
3556 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3559 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3560 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3561 rp
->rbr_refill_pending
= 0;
3564 stat
= (RX_DMA_CTL_STAT_MEX
|
3565 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3566 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3568 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3573 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3576 u32 tx_vec
= (v0
>> 32);
3577 u32 rx_vec
= (v0
& 0xffffffff);
3578 int i
, work_done
= 0;
3580 niudbg(INTR
, "%s: niu_poll_core() v0[%016llx]\n",
3581 np
->dev
->name
, (unsigned long long) v0
);
3583 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3584 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3585 if (tx_vec
& (1 << rp
->tx_channel
))
3586 niu_tx_work(np
, rp
);
3587 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3590 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3591 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3593 if (rx_vec
& (1 << rp
->rx_channel
)) {
3596 this_work_done
= niu_rx_work(np
, rp
,
3599 budget
-= this_work_done
;
3600 work_done
+= this_work_done
;
3602 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3608 static int niu_poll(struct napi_struct
*napi
, int budget
)
3610 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3611 struct niu
*np
= lp
->np
;
3614 work_done
= niu_poll_core(np
, lp
, budget
);
3616 if (work_done
< budget
) {
3617 netif_rx_complete(np
->dev
, napi
);
3618 niu_ldg_rearm(np
, lp
, 1);
3623 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3626 dev_err(np
->device
, PFX
"%s: RX channel %u errors ( ",
3627 np
->dev
->name
, rp
->rx_channel
);
3629 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3630 printk("RBR_TMOUT ");
3631 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3633 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3634 printk("BYTE_EN_BUS ");
3635 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3637 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3639 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3640 printk("RCR_SHA_PAR ");
3641 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3642 printk("RBR_PRE_PAR ");
3643 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3645 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3646 printk("RCRINCON ");
3647 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3649 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3651 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3652 printk("RBRLOGPAGE ");
3653 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3654 printk("CFIGLOGPAGE ");
3655 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3661 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3663 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3667 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3668 RX_DMA_CTL_STAT_PORT_FATAL
))
3672 dev_err(np
->device
, PFX
"%s: RX channel %u error, stat[%llx]\n",
3673 np
->dev
->name
, rp
->rx_channel
,
3674 (unsigned long long) stat
);
3676 niu_log_rxchan_errors(np
, rp
, stat
);
3679 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3680 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3685 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3688 dev_err(np
->device
, PFX
"%s: TX channel %u errors ( ",
3689 np
->dev
->name
, rp
->tx_channel
);
3691 if (cs
& TX_CS_MBOX_ERR
)
3693 if (cs
& TX_CS_PKT_SIZE_ERR
)
3694 printk("PKT_SIZE ");
3695 if (cs
& TX_CS_TX_RING_OFLOW
)
3696 printk("TX_RING_OFLOW ");
3697 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3698 printk("PREF_BUF_PAR ");
3699 if (cs
& TX_CS_NACK_PREF
)
3700 printk("NACK_PREF ");
3701 if (cs
& TX_CS_NACK_PKT_RD
)
3702 printk("NACK_PKT_RD ");
3703 if (cs
& TX_CS_CONF_PART_ERR
)
3704 printk("CONF_PART ");
3705 if (cs
& TX_CS_PKT_PRT_ERR
)
3711 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3715 cs
= nr64(TX_CS(rp
->tx_channel
));
3716 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3717 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3719 dev_err(np
->device
, PFX
"%s: TX channel %u error, "
3720 "cs[%llx] logh[%llx] logl[%llx]\n",
3721 np
->dev
->name
, rp
->tx_channel
,
3722 (unsigned long long) cs
,
3723 (unsigned long long) logh
,
3724 (unsigned long long) logl
);
3726 niu_log_txchan_errors(np
, rp
, cs
);
3731 static int niu_mif_interrupt(struct niu
*np
)
3733 u64 mif_status
= nr64(MIF_STATUS
);
3736 if (np
->flags
& NIU_FLAGS_XMAC
) {
3737 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3739 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3743 dev_err(np
->device
, PFX
"%s: MIF interrupt, "
3744 "stat[%llx] phy_mdint(%d)\n",
3745 np
->dev
->name
, (unsigned long long) mif_status
, phy_mdint
);
3750 static void niu_xmac_interrupt(struct niu
*np
)
3752 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3755 val
= nr64_mac(XTXMAC_STATUS
);
3756 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3757 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3758 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3759 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3760 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3761 mp
->tx_fifo_errors
++;
3762 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3763 mp
->tx_overflow_errors
++;
3764 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3765 mp
->tx_max_pkt_size_errors
++;
3766 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3767 mp
->tx_underflow_errors
++;
3769 val
= nr64_mac(XRXMAC_STATUS
);
3770 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3771 mp
->rx_local_faults
++;
3772 if (val
& XRXMAC_STATUS_RFLT_DET
)
3773 mp
->rx_remote_faults
++;
3774 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3775 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3776 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3777 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3778 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3779 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3780 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3781 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3782 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3783 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3784 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3785 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3786 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3787 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3788 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3789 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3790 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3791 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3792 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3793 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3794 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3795 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3796 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3797 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3798 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3799 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3800 if (val
& XRXMAC_STAT_MSK_RXOCTET_CNT_EXP
)
3801 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3802 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3803 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3804 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3805 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3806 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3807 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3808 if (val
& XRXMAC_STATUS_RXUFLOW
)
3809 mp
->rx_underflows
++;
3810 if (val
& XRXMAC_STATUS_RXOFLOW
)
3813 val
= nr64_mac(XMAC_FC_STAT
);
3814 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
3815 mp
->pause_off_state
++;
3816 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
3817 mp
->pause_on_state
++;
3818 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
3819 mp
->pause_received
++;
3822 static void niu_bmac_interrupt(struct niu
*np
)
3824 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
3827 val
= nr64_mac(BTXMAC_STATUS
);
3828 if (val
& BTXMAC_STATUS_UNDERRUN
)
3829 mp
->tx_underflow_errors
++;
3830 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
3831 mp
->tx_max_pkt_size_errors
++;
3832 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
3833 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
3834 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
3835 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
3837 val
= nr64_mac(BRXMAC_STATUS
);
3838 if (val
& BRXMAC_STATUS_OVERFLOW
)
3840 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
3841 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
3842 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
3843 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3844 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
3845 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3846 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
3847 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
3849 val
= nr64_mac(BMAC_CTRL_STATUS
);
3850 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
3851 mp
->pause_off_state
++;
3852 if (val
& BMAC_CTRL_STATUS_PAUSE
)
3853 mp
->pause_on_state
++;
3854 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
3855 mp
->pause_received
++;
3858 static int niu_mac_interrupt(struct niu
*np
)
3860 if (np
->flags
& NIU_FLAGS_XMAC
)
3861 niu_xmac_interrupt(np
);
3863 niu_bmac_interrupt(np
);
3868 static void niu_log_device_error(struct niu
*np
, u64 stat
)
3870 dev_err(np
->device
, PFX
"%s: Core device errors ( ",
3873 if (stat
& SYS_ERR_MASK_META2
)
3875 if (stat
& SYS_ERR_MASK_META1
)
3877 if (stat
& SYS_ERR_MASK_PEU
)
3879 if (stat
& SYS_ERR_MASK_TXC
)
3881 if (stat
& SYS_ERR_MASK_RDMC
)
3883 if (stat
& SYS_ERR_MASK_TDMC
)
3885 if (stat
& SYS_ERR_MASK_ZCP
)
3887 if (stat
& SYS_ERR_MASK_FFLP
)
3889 if (stat
& SYS_ERR_MASK_IPP
)
3891 if (stat
& SYS_ERR_MASK_MAC
)
3893 if (stat
& SYS_ERR_MASK_SMX
)
3899 static int niu_device_error(struct niu
*np
)
3901 u64 stat
= nr64(SYS_ERR_STAT
);
3903 dev_err(np
->device
, PFX
"%s: Core device error, stat[%llx]\n",
3904 np
->dev
->name
, (unsigned long long) stat
);
3906 niu_log_device_error(np
, stat
);
3911 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
3912 u64 v0
, u64 v1
, u64 v2
)
3921 if (v1
& 0x00000000ffffffffULL
) {
3922 u32 rx_vec
= (v1
& 0xffffffff);
3924 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3925 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3927 if (rx_vec
& (1 << rp
->rx_channel
)) {
3928 int r
= niu_rx_error(np
, rp
);
3933 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3934 RX_DMA_CTL_STAT_MEX
);
3939 if (v1
& 0x7fffffff00000000ULL
) {
3940 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
3942 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3943 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3945 if (tx_vec
& (1 << rp
->tx_channel
)) {
3946 int r
= niu_tx_error(np
, rp
);
3952 if ((v0
| v1
) & 0x8000000000000000ULL
) {
3953 int r
= niu_mif_interrupt(np
);
3959 int r
= niu_mac_interrupt(np
);
3964 int r
= niu_device_error(np
);
3971 niu_enable_interrupts(np
, 0);
3976 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
3979 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3980 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3982 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
3983 RX_DMA_CTL_STAT_RCRTO
);
3984 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
3986 niudbg(INTR
, "%s: rxchan_intr stat[%llx]\n",
3987 np
->dev
->name
, (unsigned long long) stat
);
3990 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
3993 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
3995 niudbg(INTR
, "%s: txchan_intr cs[%llx]\n",
3996 np
->dev
->name
, (unsigned long long) rp
->tx_cs
);
3999 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
4001 struct niu_parent
*parent
= np
->parent
;
4005 tx_vec
= (v0
>> 32);
4006 rx_vec
= (v0
& 0xffffffff);
4008 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4009 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4010 int ldn
= LDN_RXDMA(rp
->rx_channel
);
4012 if (parent
->ldg_map
[ldn
] != ldg
)
4015 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4016 if (rx_vec
& (1 << rp
->rx_channel
))
4017 niu_rxchan_intr(np
, rp
, ldn
);
4020 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4021 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4022 int ldn
= LDN_TXDMA(rp
->tx_channel
);
4024 if (parent
->ldg_map
[ldn
] != ldg
)
4027 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4028 if (tx_vec
& (1 << rp
->tx_channel
))
4029 niu_txchan_intr(np
, rp
, ldn
);
4033 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
4034 u64 v0
, u64 v1
, u64 v2
)
4036 if (likely(netif_rx_schedule_prep(np
->dev
, &lp
->napi
))) {
4040 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
4041 __netif_rx_schedule(np
->dev
, &lp
->napi
);
4045 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
4047 struct niu_ldg
*lp
= dev_id
;
4048 struct niu
*np
= lp
->np
;
4049 int ldg
= lp
->ldg_num
;
4050 unsigned long flags
;
4053 if (netif_msg_intr(np
))
4054 printk(KERN_DEBUG PFX
"niu_interrupt() ldg[%p](%d) ",
4057 spin_lock_irqsave(&np
->lock
, flags
);
4059 v0
= nr64(LDSV0(ldg
));
4060 v1
= nr64(LDSV1(ldg
));
4061 v2
= nr64(LDSV2(ldg
));
4063 if (netif_msg_intr(np
))
4064 printk("v0[%llx] v1[%llx] v2[%llx]\n",
4065 (unsigned long long) v0
,
4066 (unsigned long long) v1
,
4067 (unsigned long long) v2
);
4069 if (unlikely(!v0
&& !v1
&& !v2
)) {
4070 spin_unlock_irqrestore(&np
->lock
, flags
);
4074 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
4075 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
4079 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
4080 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
4082 niu_ldg_rearm(np
, lp
, 1);
4084 spin_unlock_irqrestore(&np
->lock
, flags
);
4089 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
4092 np
->ops
->free_coherent(np
->device
,
4093 sizeof(struct rxdma_mailbox
),
4094 rp
->mbox
, rp
->mbox_dma
);
4098 np
->ops
->free_coherent(np
->device
,
4099 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4100 rp
->rcr
, rp
->rcr_dma
);
4102 rp
->rcr_table_size
= 0;
4106 niu_rbr_free(np
, rp
);
4108 np
->ops
->free_coherent(np
->device
,
4109 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4110 rp
->rbr
, rp
->rbr_dma
);
4112 rp
->rbr_table_size
= 0;
4119 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
4122 np
->ops
->free_coherent(np
->device
,
4123 sizeof(struct txdma_mailbox
),
4124 rp
->mbox
, rp
->mbox_dma
);
4130 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
4131 if (rp
->tx_buffs
[i
].skb
)
4132 (void) release_tx_packet(np
, rp
, i
);
4135 np
->ops
->free_coherent(np
->device
,
4136 MAX_TX_RING_SIZE
* sizeof(__le64
),
4137 rp
->descr
, rp
->descr_dma
);
4146 static void niu_free_channels(struct niu
*np
)
4151 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4152 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4154 niu_free_rx_ring_info(np
, rp
);
4156 kfree(np
->rx_rings
);
4157 np
->rx_rings
= NULL
;
4158 np
->num_rx_rings
= 0;
4162 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4163 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4165 niu_free_tx_ring_info(np
, rp
);
4167 kfree(np
->tx_rings
);
4168 np
->tx_rings
= NULL
;
4169 np
->num_tx_rings
= 0;
4173 static int niu_alloc_rx_ring_info(struct niu
*np
,
4174 struct rx_ring_info
*rp
)
4176 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
4178 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
4183 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4184 sizeof(struct rxdma_mailbox
),
4185 &rp
->mbox_dma
, GFP_KERNEL
);
4188 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4189 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4190 "RXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4194 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
4195 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4196 &rp
->rcr_dma
, GFP_KERNEL
);
4199 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
4200 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4201 "RXDMA RCR table %p\n", np
->dev
->name
, rp
->rcr
);
4204 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
4207 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
4208 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4209 &rp
->rbr_dma
, GFP_KERNEL
);
4212 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
4213 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4214 "RXDMA RBR table %p\n", np
->dev
->name
, rp
->rbr
);
4217 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
4219 rp
->rbr_pending
= 0;
4224 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
4226 int mtu
= np
->dev
->mtu
;
4228 /* These values are recommended by the HW designers for fair
4229 * utilization of DRR amongst the rings.
4231 rp
->max_burst
= mtu
+ 32;
4232 if (rp
->max_burst
> 4096)
4233 rp
->max_burst
= 4096;
4236 static int niu_alloc_tx_ring_info(struct niu
*np
,
4237 struct tx_ring_info
*rp
)
4239 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
4241 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4242 sizeof(struct txdma_mailbox
),
4243 &rp
->mbox_dma
, GFP_KERNEL
);
4246 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4247 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4248 "TXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4252 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4253 MAX_TX_RING_SIZE
* sizeof(__le64
),
4254 &rp
->descr_dma
, GFP_KERNEL
);
4257 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4258 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4259 "TXDMA descr table %p\n", np
->dev
->name
, rp
->descr
);
4263 rp
->pending
= MAX_TX_RING_SIZE
;
4268 /* XXX make these configurable... XXX */
4269 rp
->mark_freq
= rp
->pending
/ 4;
4271 niu_set_max_burst(np
, rp
);
4276 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4280 bss
= min(PAGE_SHIFT
, 15);
4282 rp
->rbr_block_size
= 1 << bss
;
4283 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4285 rp
->rbr_sizes
[0] = 256;
4286 rp
->rbr_sizes
[1] = 1024;
4287 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4288 switch (PAGE_SIZE
) {
4290 rp
->rbr_sizes
[2] = 4096;
4294 rp
->rbr_sizes
[2] = 8192;
4298 rp
->rbr_sizes
[2] = 2048;
4300 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4303 static int niu_alloc_channels(struct niu
*np
)
4305 struct niu_parent
*parent
= np
->parent
;
4306 int first_rx_channel
, first_tx_channel
;
4310 first_rx_channel
= first_tx_channel
= 0;
4311 for (i
= 0; i
< port
; i
++) {
4312 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4313 first_tx_channel
+= parent
->txchan_per_port
[i
];
4316 np
->num_rx_rings
= parent
->rxchan_per_port
[port
];
4317 np
->num_tx_rings
= parent
->txchan_per_port
[port
];
4319 np
->dev
->real_num_tx_queues
= np
->num_tx_rings
;
4321 np
->rx_rings
= kzalloc(np
->num_rx_rings
* sizeof(struct rx_ring_info
),
4327 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4328 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4331 rp
->rx_channel
= first_rx_channel
+ i
;
4333 err
= niu_alloc_rx_ring_info(np
, rp
);
4337 niu_size_rbr(np
, rp
);
4339 /* XXX better defaults, configurable, etc... XXX */
4340 rp
->nonsyn_window
= 64;
4341 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4342 rp
->syn_window
= 64;
4343 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4344 rp
->rcr_pkt_threshold
= 16;
4345 rp
->rcr_timeout
= 8;
4346 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4347 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4348 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4350 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4355 np
->tx_rings
= kzalloc(np
->num_tx_rings
* sizeof(struct tx_ring_info
),
4361 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4362 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4365 rp
->tx_channel
= first_tx_channel
+ i
;
4367 err
= niu_alloc_tx_ring_info(np
, rp
);
4375 niu_free_channels(np
);
4379 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4383 while (--limit
> 0) {
4384 u64 val
= nr64(TX_CS(channel
));
4385 if (val
& TX_CS_SNG_STATE
)
4391 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4393 u64 val
= nr64(TX_CS(channel
));
4395 val
|= TX_CS_STOP_N_GO
;
4396 nw64(TX_CS(channel
), val
);
4398 return niu_tx_cs_sng_poll(np
, channel
);
4401 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4405 while (--limit
> 0) {
4406 u64 val
= nr64(TX_CS(channel
));
4407 if (!(val
& TX_CS_RST
))
4413 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4415 u64 val
= nr64(TX_CS(channel
));
4419 nw64(TX_CS(channel
), val
);
4421 err
= niu_tx_cs_reset_poll(np
, channel
);
4423 nw64(TX_RING_KICK(channel
), 0);
4428 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4432 nw64(TX_LOG_MASK1(channel
), 0);
4433 nw64(TX_LOG_VAL1(channel
), 0);
4434 nw64(TX_LOG_MASK2(channel
), 0);
4435 nw64(TX_LOG_VAL2(channel
), 0);
4436 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4437 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4438 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4440 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4441 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4442 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4444 /* XXX TXDMA 32bit mode? XXX */
4449 static void niu_txc_enable_port(struct niu
*np
, int on
)
4451 unsigned long flags
;
4454 niu_lock_parent(np
, flags
);
4455 val
= nr64(TXC_CONTROL
);
4456 mask
= (u64
)1 << np
->port
;
4458 val
|= TXC_CONTROL_ENABLE
| mask
;
4461 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4462 val
&= ~TXC_CONTROL_ENABLE
;
4464 nw64(TXC_CONTROL
, val
);
4465 niu_unlock_parent(np
, flags
);
4468 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4470 unsigned long flags
;
4473 niu_lock_parent(np
, flags
);
4474 val
= nr64(TXC_INT_MASK
);
4475 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4476 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4477 niu_unlock_parent(np
, flags
);
4480 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4487 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4488 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4490 nw64(TXC_PORT_DMA(np
->port
), val
);
4493 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4495 int err
, channel
= rp
->tx_channel
;
4498 err
= niu_tx_channel_stop(np
, channel
);
4502 err
= niu_tx_channel_reset(np
, channel
);
4506 err
= niu_tx_channel_lpage_init(np
, channel
);
4510 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4511 nw64(TX_ENT_MSK(channel
), 0);
4513 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4514 TX_RNG_CFIG_STADDR
)) {
4515 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4516 "DMA addr (%llx) is not aligned.\n",
4517 np
->dev
->name
, channel
,
4518 (unsigned long long) rp
->descr_dma
);
4522 /* The length field in TX_RNG_CFIG is measured in 64-byte
4523 * blocks. rp->pending is the number of TX descriptors in
4524 * our ring, 8 bytes each, thus we divide by 8 bytes more
4525 * to get the proper value the chip wants.
4527 ring_len
= (rp
->pending
/ 8);
4529 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4531 nw64(TX_RNG_CFIG(channel
), val
);
4533 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4534 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4535 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4536 "MBOX addr (%llx) is has illegal bits.\n",
4537 np
->dev
->name
, channel
,
4538 (unsigned long long) rp
->mbox_dma
);
4541 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4542 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4544 nw64(TX_CS(channel
), 0);
4546 rp
->last_pkt_cnt
= 0;
4551 static void niu_init_rdc_groups(struct niu
*np
)
4553 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4554 int i
, first_table_num
= tp
->first_table_num
;
4556 for (i
= 0; i
< tp
->num_tables
; i
++) {
4557 struct rdc_table
*tbl
= &tp
->tables
[i
];
4558 int this_table
= first_table_num
+ i
;
4561 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4562 nw64(RDC_TBL(this_table
, slot
),
4563 tbl
->rxdma_channel
[slot
]);
4566 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4569 static void niu_init_drr_weight(struct niu
*np
)
4571 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4576 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4581 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4584 nw64(PT_DRR_WT(np
->port
), val
);
4587 static int niu_init_hostinfo(struct niu
*np
)
4589 struct niu_parent
*parent
= np
->parent
;
4590 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4591 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4592 int first_rdc_table
= tp
->first_table_num
;
4594 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4598 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4602 for (i
= 0; i
< num_alt
; i
++) {
4603 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4611 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4613 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4614 RXDMA_CFIG1_RST
, 1000, 10,
4618 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4622 nw64(RX_LOG_MASK1(channel
), 0);
4623 nw64(RX_LOG_VAL1(channel
), 0);
4624 nw64(RX_LOG_MASK2(channel
), 0);
4625 nw64(RX_LOG_VAL2(channel
), 0);
4626 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4627 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4628 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4630 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4631 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4632 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4637 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4641 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4642 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4643 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4644 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4645 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4648 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4652 switch (rp
->rbr_block_size
) {
4654 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4657 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4660 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4663 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4668 val
|= RBR_CFIG_B_VLD2
;
4669 switch (rp
->rbr_sizes
[2]) {
4671 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4674 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4677 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4680 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4686 val
|= RBR_CFIG_B_VLD1
;
4687 switch (rp
->rbr_sizes
[1]) {
4689 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4692 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4695 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4698 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4704 val
|= RBR_CFIG_B_VLD0
;
4705 switch (rp
->rbr_sizes
[0]) {
4707 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4710 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4713 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4716 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4727 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4729 u64 val
= nr64(RXDMA_CFIG1(channel
));
4733 val
|= RXDMA_CFIG1_EN
;
4735 val
&= ~RXDMA_CFIG1_EN
;
4736 nw64(RXDMA_CFIG1(channel
), val
);
4739 while (--limit
> 0) {
4740 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4749 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4751 int err
, channel
= rp
->rx_channel
;
4754 err
= niu_rx_channel_reset(np
, channel
);
4758 err
= niu_rx_channel_lpage_init(np
, channel
);
4762 niu_rx_channel_wred_init(np
, rp
);
4764 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4765 nw64(RX_DMA_CTL_STAT(channel
),
4766 (RX_DMA_CTL_STAT_MEX
|
4767 RX_DMA_CTL_STAT_RCRTHRES
|
4768 RX_DMA_CTL_STAT_RCRTO
|
4769 RX_DMA_CTL_STAT_RBR_EMPTY
));
4770 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4771 nw64(RXDMA_CFIG2(channel
), (rp
->mbox_dma
& 0x00000000ffffffc0));
4772 nw64(RBR_CFIG_A(channel
),
4773 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4774 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4775 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4778 nw64(RBR_CFIG_B(channel
), val
);
4779 nw64(RCRCFIG_A(channel
),
4780 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4781 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4782 nw64(RCRCFIG_B(channel
),
4783 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4785 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4787 err
= niu_enable_rx_channel(np
, channel
, 1);
4791 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4793 val
= nr64(RX_DMA_CTL_STAT(channel
));
4794 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4795 nw64(RX_DMA_CTL_STAT(channel
), val
);
4800 static int niu_init_rx_channels(struct niu
*np
)
4802 unsigned long flags
;
4803 u64 seed
= jiffies_64
;
4806 niu_lock_parent(np
, flags
);
4807 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
4808 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
4809 niu_unlock_parent(np
, flags
);
4811 /* XXX RXDMA 32bit mode? XXX */
4813 niu_init_rdc_groups(np
);
4814 niu_init_drr_weight(np
);
4816 err
= niu_init_hostinfo(np
);
4820 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4821 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4823 err
= niu_init_one_rx_channel(np
, rp
);
4831 static int niu_set_ip_frag_rule(struct niu
*np
)
4833 struct niu_parent
*parent
= np
->parent
;
4834 struct niu_classifier
*cp
= &np
->clas
;
4835 struct niu_tcam_entry
*tp
;
4838 /* XXX fix this allocation scheme XXX */
4839 index
= cp
->tcam_index
;
4840 tp
= &parent
->tcam
[index
];
4842 /* Note that the noport bit is the same in both ipv4 and
4843 * ipv6 format TCAM entries.
4845 memset(tp
, 0, sizeof(*tp
));
4846 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
4847 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
4848 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
4849 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
4850 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
4853 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
4860 static int niu_init_classifier_hw(struct niu
*np
)
4862 struct niu_parent
*parent
= np
->parent
;
4863 struct niu_classifier
*cp
= &np
->clas
;
4866 nw64(H1POLY
, cp
->h1_init
);
4867 nw64(H2POLY
, cp
->h2_init
);
4869 err
= niu_init_hostinfo(np
);
4873 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
4874 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
4876 vlan_tbl_write(np
, i
, np
->port
,
4877 vp
->vlan_pref
, vp
->rdc_num
);
4880 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
4881 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
4883 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
4884 ap
->rdc_num
, ap
->mac_pref
);
4889 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
4890 int index
= i
- CLASS_CODE_USER_PROG1
;
4892 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
4895 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
4900 err
= niu_set_ip_frag_rule(np
);
4909 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
4911 nw64(ZCP_RAM_DATA0
, data
[0]);
4912 nw64(ZCP_RAM_DATA1
, data
[1]);
4913 nw64(ZCP_RAM_DATA2
, data
[2]);
4914 nw64(ZCP_RAM_DATA3
, data
[3]);
4915 nw64(ZCP_RAM_DATA4
, data
[4]);
4916 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
4918 (ZCP_RAM_ACC_WRITE
|
4919 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4920 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4922 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4926 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
4930 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4933 dev_err(np
->device
, PFX
"%s: ZCP read busy won't clear, "
4934 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4935 (unsigned long long) nr64(ZCP_RAM_ACC
));
4941 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4942 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4944 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4947 dev_err(np
->device
, PFX
"%s: ZCP read busy2 won't clear, "
4948 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4949 (unsigned long long) nr64(ZCP_RAM_ACC
));
4953 data
[0] = nr64(ZCP_RAM_DATA0
);
4954 data
[1] = nr64(ZCP_RAM_DATA1
);
4955 data
[2] = nr64(ZCP_RAM_DATA2
);
4956 data
[3] = nr64(ZCP_RAM_DATA3
);
4957 data
[4] = nr64(ZCP_RAM_DATA4
);
4962 static void niu_zcp_cfifo_reset(struct niu
*np
)
4964 u64 val
= nr64(RESET_CFIFO
);
4966 val
|= RESET_CFIFO_RST(np
->port
);
4967 nw64(RESET_CFIFO
, val
);
4970 val
&= ~RESET_CFIFO_RST(np
->port
);
4971 nw64(RESET_CFIFO
, val
);
4974 static int niu_init_zcp(struct niu
*np
)
4976 u64 data
[5], rbuf
[5];
4979 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
4980 if (np
->port
== 0 || np
->port
== 1)
4981 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
4983 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
4985 max
= NIU_CFIFO_ENTRIES
;
4993 for (i
= 0; i
< max
; i
++) {
4994 err
= niu_zcp_write(np
, i
, data
);
4997 err
= niu_zcp_read(np
, i
, rbuf
);
5002 niu_zcp_cfifo_reset(np
);
5003 nw64(CFIFO_ECC(np
->port
), 0);
5004 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
5005 (void) nr64(ZCP_INT_STAT
);
5006 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
5011 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
5013 u64 val
= nr64_ipp(IPP_CFIG
);
5015 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
5016 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
5017 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
5018 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
5019 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
5020 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
5021 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
5022 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
5025 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
5027 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
5028 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
5029 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
5030 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
5031 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
5032 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
5035 static int niu_ipp_reset(struct niu
*np
)
5037 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
5038 1000, 100, "IPP_CFIG");
5041 static int niu_init_ipp(struct niu
*np
)
5043 u64 data
[5], rbuf
[5], val
;
5046 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5047 if (np
->port
== 0 || np
->port
== 1)
5048 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
5050 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
5052 max
= NIU_DFIFO_ENTRIES
;
5060 for (i
= 0; i
< max
; i
++) {
5061 niu_ipp_write(np
, i
, data
);
5062 niu_ipp_read(np
, i
, rbuf
);
5065 (void) nr64_ipp(IPP_INT_STAT
);
5066 (void) nr64_ipp(IPP_INT_STAT
);
5068 err
= niu_ipp_reset(np
);
5072 (void) nr64_ipp(IPP_PKT_DIS
);
5073 (void) nr64_ipp(IPP_BAD_CS_CNT
);
5074 (void) nr64_ipp(IPP_ECC
);
5076 (void) nr64_ipp(IPP_INT_STAT
);
5078 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
5080 val
= nr64_ipp(IPP_CFIG
);
5081 val
&= ~IPP_CFIG_IP_MAX_PKT
;
5082 val
|= (IPP_CFIG_IPP_ENABLE
|
5083 IPP_CFIG_DFIFO_ECC_EN
|
5084 IPP_CFIG_DROP_BAD_CRC
|
5086 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
5087 nw64_ipp(IPP_CFIG
, val
);
5092 static void niu_handle_led(struct niu
*np
, int status
)
5095 val
= nr64_mac(XMAC_CONFIG
);
5097 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
5098 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
5100 val
|= XMAC_CONFIG_LED_POLARITY
;
5101 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
5103 val
|= XMAC_CONFIG_FORCE_LED_ON
;
5104 val
&= ~XMAC_CONFIG_LED_POLARITY
;
5108 nw64_mac(XMAC_CONFIG
, val
);
5111 static void niu_init_xif_xmac(struct niu
*np
)
5113 struct niu_link_config
*lp
= &np
->link_config
;
5116 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
5117 val
= nr64(MIF_CONFIG
);
5118 val
|= MIF_CONFIG_ATCA_GE
;
5119 nw64(MIF_CONFIG
, val
);
5122 val
= nr64_mac(XMAC_CONFIG
);
5123 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5125 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
5127 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
5128 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5129 val
|= XMAC_CONFIG_LOOPBACK
;
5131 val
&= ~XMAC_CONFIG_LOOPBACK
;
5134 if (np
->flags
& NIU_FLAGS_10G
) {
5135 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
5137 val
|= XMAC_CONFIG_LFS_DISABLE
;
5138 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
5139 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
5140 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
5142 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
5145 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5147 if (lp
->active_speed
== SPEED_100
)
5148 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
5150 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
5152 nw64_mac(XMAC_CONFIG
, val
);
5154 val
= nr64_mac(XMAC_CONFIG
);
5155 val
&= ~XMAC_CONFIG_MODE_MASK
;
5156 if (np
->flags
& NIU_FLAGS_10G
) {
5157 val
|= XMAC_CONFIG_MODE_XGMII
;
5159 if (lp
->active_speed
== SPEED_100
)
5160 val
|= XMAC_CONFIG_MODE_MII
;
5162 val
|= XMAC_CONFIG_MODE_GMII
;
5165 nw64_mac(XMAC_CONFIG
, val
);
5168 static void niu_init_xif_bmac(struct niu
*np
)
5170 struct niu_link_config
*lp
= &np
->link_config
;
5173 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
5175 if (lp
->loopback_mode
== LOOPBACK_MAC
)
5176 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
5178 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
5180 if (lp
->active_speed
== SPEED_1000
)
5181 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
5183 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
5185 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
5186 BMAC_XIF_CONFIG_LED_POLARITY
);
5188 if (!(np
->flags
& NIU_FLAGS_10G
) &&
5189 !(np
->flags
& NIU_FLAGS_FIBER
) &&
5190 lp
->active_speed
== SPEED_100
)
5191 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5193 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5195 nw64_mac(BMAC_XIF_CONFIG
, val
);
5198 static void niu_init_xif(struct niu
*np
)
5200 if (np
->flags
& NIU_FLAGS_XMAC
)
5201 niu_init_xif_xmac(np
);
5203 niu_init_xif_bmac(np
);
5206 static void niu_pcs_mii_reset(struct niu
*np
)
5209 u64 val
= nr64_pcs(PCS_MII_CTL
);
5210 val
|= PCS_MII_CTL_RST
;
5211 nw64_pcs(PCS_MII_CTL
, val
);
5212 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
5214 val
= nr64_pcs(PCS_MII_CTL
);
5218 static void niu_xpcs_reset(struct niu
*np
)
5221 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
5222 val
|= XPCS_CONTROL1_RESET
;
5223 nw64_xpcs(XPCS_CONTROL1
, val
);
5224 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
5226 val
= nr64_xpcs(XPCS_CONTROL1
);
5230 static int niu_init_pcs(struct niu
*np
)
5232 struct niu_link_config
*lp
= &np
->link_config
;
5235 switch (np
->flags
& (NIU_FLAGS_10G
|
5237 NIU_FLAGS_XCVR_SERDES
)) {
5238 case NIU_FLAGS_FIBER
:
5240 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5241 nw64_pcs(PCS_DPATH_MODE
, 0);
5242 niu_pcs_mii_reset(np
);
5246 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
5247 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
5249 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5252 /* 10G copper or fiber */
5253 val
= nr64_mac(XMAC_CONFIG
);
5254 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5255 nw64_mac(XMAC_CONFIG
, val
);
5259 val
= nr64_xpcs(XPCS_CONTROL1
);
5260 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5261 val
|= XPCS_CONTROL1_LOOPBACK
;
5263 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5264 nw64_xpcs(XPCS_CONTROL1
, val
);
5266 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5267 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5268 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5272 case NIU_FLAGS_XCVR_SERDES
:
5274 niu_pcs_mii_reset(np
);
5275 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5276 nw64_pcs(PCS_DPATH_MODE
, 0);
5281 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5282 /* 1G RGMII FIBER */
5283 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5284 niu_pcs_mii_reset(np
);
5294 static int niu_reset_tx_xmac(struct niu
*np
)
5296 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5297 (XTXMAC_SW_RST_REG_RS
|
5298 XTXMAC_SW_RST_SOFT_RST
),
5299 1000, 100, "XTXMAC_SW_RST");
5302 static int niu_reset_tx_bmac(struct niu
*np
)
5306 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5308 while (--limit
>= 0) {
5309 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5314 dev_err(np
->device
, PFX
"Port %u TX BMAC would not reset, "
5315 "BTXMAC_SW_RST[%llx]\n",
5317 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5324 static int niu_reset_tx_mac(struct niu
*np
)
5326 if (np
->flags
& NIU_FLAGS_XMAC
)
5327 return niu_reset_tx_xmac(np
);
5329 return niu_reset_tx_bmac(np
);
5332 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5336 val
= nr64_mac(XMAC_MIN
);
5337 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5338 XMAC_MIN_RX_MIN_PKT_SIZE
);
5339 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5340 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5341 nw64_mac(XMAC_MIN
, val
);
5343 nw64_mac(XMAC_MAX
, max
);
5345 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5347 val
= nr64_mac(XMAC_IPG
);
5348 if (np
->flags
& NIU_FLAGS_10G
) {
5349 val
&= ~XMAC_IPG_IPG_XGMII
;
5350 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5352 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5353 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5355 nw64_mac(XMAC_IPG
, val
);
5357 val
= nr64_mac(XMAC_CONFIG
);
5358 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5359 XMAC_CONFIG_STRETCH_MODE
|
5360 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5361 XMAC_CONFIG_TX_ENABLE
);
5362 nw64_mac(XMAC_CONFIG
, val
);
5364 nw64_mac(TXMAC_FRM_CNT
, 0);
5365 nw64_mac(TXMAC_BYTE_CNT
, 0);
5368 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5372 nw64_mac(BMAC_MIN_FRAME
, min
);
5373 nw64_mac(BMAC_MAX_FRAME
, max
);
5375 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5376 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5377 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5379 val
= nr64_mac(BTXMAC_CONFIG
);
5380 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5381 BTXMAC_CONFIG_ENABLE
);
5382 nw64_mac(BTXMAC_CONFIG
, val
);
5385 static void niu_init_tx_mac(struct niu
*np
)
5390 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5395 /* The XMAC_MIN register only accepts values for TX min which
5396 * have the low 3 bits cleared.
5398 BUILD_BUG_ON(min
& 0x7);
5400 if (np
->flags
& NIU_FLAGS_XMAC
)
5401 niu_init_tx_xmac(np
, min
, max
);
5403 niu_init_tx_bmac(np
, min
, max
);
5406 static int niu_reset_rx_xmac(struct niu
*np
)
5410 nw64_mac(XRXMAC_SW_RST
,
5411 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5413 while (--limit
>= 0) {
5414 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5415 XRXMAC_SW_RST_SOFT_RST
)))
5420 dev_err(np
->device
, PFX
"Port %u RX XMAC would not reset, "
5421 "XRXMAC_SW_RST[%llx]\n",
5423 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5430 static int niu_reset_rx_bmac(struct niu
*np
)
5434 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5436 while (--limit
>= 0) {
5437 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5442 dev_err(np
->device
, PFX
"Port %u RX BMAC would not reset, "
5443 "BRXMAC_SW_RST[%llx]\n",
5445 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5452 static int niu_reset_rx_mac(struct niu
*np
)
5454 if (np
->flags
& NIU_FLAGS_XMAC
)
5455 return niu_reset_rx_xmac(np
);
5457 return niu_reset_rx_bmac(np
);
5460 static void niu_init_rx_xmac(struct niu
*np
)
5462 struct niu_parent
*parent
= np
->parent
;
5463 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5464 int first_rdc_table
= tp
->first_table_num
;
5468 nw64_mac(XMAC_ADD_FILT0
, 0);
5469 nw64_mac(XMAC_ADD_FILT1
, 0);
5470 nw64_mac(XMAC_ADD_FILT2
, 0);
5471 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5472 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5473 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5474 nw64_mac(XMAC_HASH_TBL(i
), 0);
5475 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5476 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5477 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5479 val
= nr64_mac(XMAC_CONFIG
);
5480 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5481 XMAC_CONFIG_PROMISCUOUS
|
5482 XMAC_CONFIG_PROMISC_GROUP
|
5483 XMAC_CONFIG_ERR_CHK_DIS
|
5484 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5485 XMAC_CONFIG_RESERVED_MULTICAST
|
5486 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5487 XMAC_CONFIG_ADDR_FILTER_EN
|
5488 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5489 XMAC_CONFIG_STRIP_CRC
|
5490 XMAC_CONFIG_PASS_FLOW_CTRL
|
5491 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5492 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5493 nw64_mac(XMAC_CONFIG
, val
);
5495 nw64_mac(RXMAC_BT_CNT
, 0);
5496 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5497 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5498 nw64_mac(RXMAC_FRAG_CNT
, 0);
5499 nw64_mac(RXMAC_HIST_CNT1
, 0);
5500 nw64_mac(RXMAC_HIST_CNT2
, 0);
5501 nw64_mac(RXMAC_HIST_CNT3
, 0);
5502 nw64_mac(RXMAC_HIST_CNT4
, 0);
5503 nw64_mac(RXMAC_HIST_CNT5
, 0);
5504 nw64_mac(RXMAC_HIST_CNT6
, 0);
5505 nw64_mac(RXMAC_HIST_CNT7
, 0);
5506 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5507 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5508 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5509 nw64_mac(LINK_FAULT_CNT
, 0);
5512 static void niu_init_rx_bmac(struct niu
*np
)
5514 struct niu_parent
*parent
= np
->parent
;
5515 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5516 int first_rdc_table
= tp
->first_table_num
;
5520 nw64_mac(BMAC_ADD_FILT0
, 0);
5521 nw64_mac(BMAC_ADD_FILT1
, 0);
5522 nw64_mac(BMAC_ADD_FILT2
, 0);
5523 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5524 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5525 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5526 nw64_mac(BMAC_HASH_TBL(i
), 0);
5527 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5528 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5529 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5531 val
= nr64_mac(BRXMAC_CONFIG
);
5532 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5533 BRXMAC_CONFIG_STRIP_PAD
|
5534 BRXMAC_CONFIG_STRIP_FCS
|
5535 BRXMAC_CONFIG_PROMISC
|
5536 BRXMAC_CONFIG_PROMISC_GRP
|
5537 BRXMAC_CONFIG_ADDR_FILT_EN
|
5538 BRXMAC_CONFIG_DISCARD_DIS
);
5539 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5540 nw64_mac(BRXMAC_CONFIG
, val
);
5542 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5543 val
|= BMAC_ADDR_CMPEN_EN0
;
5544 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5547 static void niu_init_rx_mac(struct niu
*np
)
5549 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5551 if (np
->flags
& NIU_FLAGS_XMAC
)
5552 niu_init_rx_xmac(np
);
5554 niu_init_rx_bmac(np
);
5557 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5559 u64 val
= nr64_mac(XMAC_CONFIG
);
5562 val
|= XMAC_CONFIG_TX_ENABLE
;
5564 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5565 nw64_mac(XMAC_CONFIG
, val
);
5568 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5570 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5573 val
|= BTXMAC_CONFIG_ENABLE
;
5575 val
&= ~BTXMAC_CONFIG_ENABLE
;
5576 nw64_mac(BTXMAC_CONFIG
, val
);
5579 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5581 if (np
->flags
& NIU_FLAGS_XMAC
)
5582 niu_enable_tx_xmac(np
, on
);
5584 niu_enable_tx_bmac(np
, on
);
5587 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5589 u64 val
= nr64_mac(XMAC_CONFIG
);
5591 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5592 XMAC_CONFIG_PROMISCUOUS
);
5594 if (np
->flags
& NIU_FLAGS_MCAST
)
5595 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5596 if (np
->flags
& NIU_FLAGS_PROMISC
)
5597 val
|= XMAC_CONFIG_PROMISCUOUS
;
5600 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5602 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5603 nw64_mac(XMAC_CONFIG
, val
);
5606 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5608 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5610 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5611 BRXMAC_CONFIG_PROMISC
);
5613 if (np
->flags
& NIU_FLAGS_MCAST
)
5614 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5615 if (np
->flags
& NIU_FLAGS_PROMISC
)
5616 val
|= BRXMAC_CONFIG_PROMISC
;
5619 val
|= BRXMAC_CONFIG_ENABLE
;
5621 val
&= ~BRXMAC_CONFIG_ENABLE
;
5622 nw64_mac(BRXMAC_CONFIG
, val
);
5625 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5627 if (np
->flags
& NIU_FLAGS_XMAC
)
5628 niu_enable_rx_xmac(np
, on
);
5630 niu_enable_rx_bmac(np
, on
);
5633 static int niu_init_mac(struct niu
*np
)
5638 err
= niu_init_pcs(np
);
5642 err
= niu_reset_tx_mac(np
);
5645 niu_init_tx_mac(np
);
5646 err
= niu_reset_rx_mac(np
);
5649 niu_init_rx_mac(np
);
5651 /* This looks hookey but the RX MAC reset we just did will
5652 * undo some of the state we setup in niu_init_tx_mac() so we
5653 * have to call it again. In particular, the RX MAC reset will
5654 * set the XMAC_MAX register back to it's default value.
5656 niu_init_tx_mac(np
);
5657 niu_enable_tx_mac(np
, 1);
5659 niu_enable_rx_mac(np
, 1);
5664 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5666 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5669 static void niu_stop_tx_channels(struct niu
*np
)
5673 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5674 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5676 niu_stop_one_tx_channel(np
, rp
);
5680 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5682 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5685 static void niu_reset_tx_channels(struct niu
*np
)
5689 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5690 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5692 niu_reset_one_tx_channel(np
, rp
);
5696 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5698 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5701 static void niu_stop_rx_channels(struct niu
*np
)
5705 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5706 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5708 niu_stop_one_rx_channel(np
, rp
);
5712 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5714 int channel
= rp
->rx_channel
;
5716 (void) niu_rx_channel_reset(np
, channel
);
5717 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5718 nw64(RX_DMA_CTL_STAT(channel
), 0);
5719 (void) niu_enable_rx_channel(np
, channel
, 0);
5722 static void niu_reset_rx_channels(struct niu
*np
)
5726 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5727 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5729 niu_reset_one_rx_channel(np
, rp
);
5733 static void niu_disable_ipp(struct niu
*np
)
5738 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5739 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5741 while (--limit
>= 0 && (rd
!= wr
)) {
5742 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5743 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5746 (rd
!= 0 && wr
!= 1)) {
5747 dev_err(np
->device
, PFX
"%s: IPP would not quiesce, "
5748 "rd_ptr[%llx] wr_ptr[%llx]\n",
5750 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR
),
5751 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR
));
5754 val
= nr64_ipp(IPP_CFIG
);
5755 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5756 IPP_CFIG_DFIFO_ECC_EN
|
5757 IPP_CFIG_DROP_BAD_CRC
|
5759 nw64_ipp(IPP_CFIG
, val
);
5761 (void) niu_ipp_reset(np
);
5764 static int niu_init_hw(struct niu
*np
)
5768 niudbg(IFUP
, "%s: Initialize TXC\n", np
->dev
->name
);
5769 niu_txc_enable_port(np
, 1);
5770 niu_txc_port_dma_enable(np
, 1);
5771 niu_txc_set_imask(np
, 0);
5773 niudbg(IFUP
, "%s: Initialize TX channels\n", np
->dev
->name
);
5774 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5775 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5777 err
= niu_init_one_tx_channel(np
, rp
);
5782 niudbg(IFUP
, "%s: Initialize RX channels\n", np
->dev
->name
);
5783 err
= niu_init_rx_channels(np
);
5785 goto out_uninit_tx_channels
;
5787 niudbg(IFUP
, "%s: Initialize classifier\n", np
->dev
->name
);
5788 err
= niu_init_classifier_hw(np
);
5790 goto out_uninit_rx_channels
;
5792 niudbg(IFUP
, "%s: Initialize ZCP\n", np
->dev
->name
);
5793 err
= niu_init_zcp(np
);
5795 goto out_uninit_rx_channels
;
5797 niudbg(IFUP
, "%s: Initialize IPP\n", np
->dev
->name
);
5798 err
= niu_init_ipp(np
);
5800 goto out_uninit_rx_channels
;
5802 niudbg(IFUP
, "%s: Initialize MAC\n", np
->dev
->name
);
5803 err
= niu_init_mac(np
);
5805 goto out_uninit_ipp
;
5810 niudbg(IFUP
, "%s: Uninit IPP\n", np
->dev
->name
);
5811 niu_disable_ipp(np
);
5813 out_uninit_rx_channels
:
5814 niudbg(IFUP
, "%s: Uninit RX channels\n", np
->dev
->name
);
5815 niu_stop_rx_channels(np
);
5816 niu_reset_rx_channels(np
);
5818 out_uninit_tx_channels
:
5819 niudbg(IFUP
, "%s: Uninit TX channels\n", np
->dev
->name
);
5820 niu_stop_tx_channels(np
);
5821 niu_reset_tx_channels(np
);
5826 static void niu_stop_hw(struct niu
*np
)
5828 niudbg(IFDOWN
, "%s: Disable interrupts\n", np
->dev
->name
);
5829 niu_enable_interrupts(np
, 0);
5831 niudbg(IFDOWN
, "%s: Disable RX MAC\n", np
->dev
->name
);
5832 niu_enable_rx_mac(np
, 0);
5834 niudbg(IFDOWN
, "%s: Disable IPP\n", np
->dev
->name
);
5835 niu_disable_ipp(np
);
5837 niudbg(IFDOWN
, "%s: Stop TX channels\n", np
->dev
->name
);
5838 niu_stop_tx_channels(np
);
5840 niudbg(IFDOWN
, "%s: Stop RX channels\n", np
->dev
->name
);
5841 niu_stop_rx_channels(np
);
5843 niudbg(IFDOWN
, "%s: Reset TX channels\n", np
->dev
->name
);
5844 niu_reset_tx_channels(np
);
5846 niudbg(IFDOWN
, "%s: Reset RX channels\n", np
->dev
->name
);
5847 niu_reset_rx_channels(np
);
5850 static int niu_request_irq(struct niu
*np
)
5855 for (i
= 0; i
< np
->num_ldg
; i
++) {
5856 struct niu_ldg
*lp
= &np
->ldg
[i
];
5858 err
= request_irq(lp
->irq
, niu_interrupt
,
5859 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
5869 for (j
= 0; j
< i
; j
++) {
5870 struct niu_ldg
*lp
= &np
->ldg
[j
];
5872 free_irq(lp
->irq
, lp
);
5877 static void niu_free_irq(struct niu
*np
)
5881 for (i
= 0; i
< np
->num_ldg
; i
++) {
5882 struct niu_ldg
*lp
= &np
->ldg
[i
];
5884 free_irq(lp
->irq
, lp
);
5888 static void niu_enable_napi(struct niu
*np
)
5892 for (i
= 0; i
< np
->num_ldg
; i
++)
5893 napi_enable(&np
->ldg
[i
].napi
);
5896 static void niu_disable_napi(struct niu
*np
)
5900 for (i
= 0; i
< np
->num_ldg
; i
++)
5901 napi_disable(&np
->ldg
[i
].napi
);
5904 static int niu_open(struct net_device
*dev
)
5906 struct niu
*np
= netdev_priv(dev
);
5909 netif_carrier_off(dev
);
5911 err
= niu_alloc_channels(np
);
5915 err
= niu_enable_interrupts(np
, 0);
5917 goto out_free_channels
;
5919 err
= niu_request_irq(np
);
5921 goto out_free_channels
;
5923 niu_enable_napi(np
);
5925 spin_lock_irq(&np
->lock
);
5927 err
= niu_init_hw(np
);
5929 init_timer(&np
->timer
);
5930 np
->timer
.expires
= jiffies
+ HZ
;
5931 np
->timer
.data
= (unsigned long) np
;
5932 np
->timer
.function
= niu_timer
;
5934 err
= niu_enable_interrupts(np
, 1);
5939 spin_unlock_irq(&np
->lock
);
5942 niu_disable_napi(np
);
5946 netif_tx_start_all_queues(dev
);
5948 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
5949 netif_carrier_on(dev
);
5951 add_timer(&np
->timer
);
5959 niu_free_channels(np
);
5965 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
5967 cancel_work_sync(&np
->reset_task
);
5969 niu_disable_napi(np
);
5970 netif_tx_stop_all_queues(dev
);
5972 del_timer_sync(&np
->timer
);
5974 spin_lock_irq(&np
->lock
);
5978 spin_unlock_irq(&np
->lock
);
5981 static int niu_close(struct net_device
*dev
)
5983 struct niu
*np
= netdev_priv(dev
);
5985 niu_full_shutdown(np
, dev
);
5989 niu_free_channels(np
);
5991 niu_handle_led(np
, 0);
5996 static void niu_sync_xmac_stats(struct niu
*np
)
5998 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
6000 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
6001 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
6003 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
6004 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
6005 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
6006 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
6007 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
6008 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
6009 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
6010 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
6011 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
6012 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
6013 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
6014 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
6015 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
6016 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
6017 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
6018 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
6021 static void niu_sync_bmac_stats(struct niu
*np
)
6023 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
6025 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
6026 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
6028 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
6029 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6030 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6031 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
6034 static void niu_sync_mac_stats(struct niu
*np
)
6036 if (np
->flags
& NIU_FLAGS_XMAC
)
6037 niu_sync_xmac_stats(np
);
6039 niu_sync_bmac_stats(np
);
6042 static void niu_get_rx_stats(struct niu
*np
)
6044 unsigned long pkts
, dropped
, errors
, bytes
;
6047 pkts
= dropped
= errors
= bytes
= 0;
6048 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6049 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6051 pkts
+= rp
->rx_packets
;
6052 bytes
+= rp
->rx_bytes
;
6053 dropped
+= rp
->rx_dropped
;
6054 errors
+= rp
->rx_errors
;
6056 np
->net_stats
.rx_packets
= pkts
;
6057 np
->net_stats
.rx_bytes
= bytes
;
6058 np
->net_stats
.rx_dropped
= dropped
;
6059 np
->net_stats
.rx_errors
= errors
;
6062 static void niu_get_tx_stats(struct niu
*np
)
6064 unsigned long pkts
, errors
, bytes
;
6067 pkts
= errors
= bytes
= 0;
6068 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6069 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6071 pkts
+= rp
->tx_packets
;
6072 bytes
+= rp
->tx_bytes
;
6073 errors
+= rp
->tx_errors
;
6075 np
->net_stats
.tx_packets
= pkts
;
6076 np
->net_stats
.tx_bytes
= bytes
;
6077 np
->net_stats
.tx_errors
= errors
;
6080 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
6082 struct niu
*np
= netdev_priv(dev
);
6084 niu_get_rx_stats(np
);
6085 niu_get_tx_stats(np
);
6087 return &np
->net_stats
;
6090 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
6094 for (i
= 0; i
< 16; i
++)
6095 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
6098 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
6102 for (i
= 0; i
< 16; i
++)
6103 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
6106 static void niu_load_hash(struct niu
*np
, u16
*hash
)
6108 if (np
->flags
& NIU_FLAGS_XMAC
)
6109 niu_load_hash_xmac(np
, hash
);
6111 niu_load_hash_bmac(np
, hash
);
6114 static void niu_set_rx_mode(struct net_device
*dev
)
6116 struct niu
*np
= netdev_priv(dev
);
6117 int i
, alt_cnt
, err
;
6118 struct dev_addr_list
*addr
;
6119 unsigned long flags
;
6120 u16 hash
[16] = { 0, };
6122 spin_lock_irqsave(&np
->lock
, flags
);
6123 niu_enable_rx_mac(np
, 0);
6125 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
6126 if (dev
->flags
& IFF_PROMISC
)
6127 np
->flags
|= NIU_FLAGS_PROMISC
;
6128 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 0))
6129 np
->flags
|= NIU_FLAGS_MCAST
;
6131 alt_cnt
= dev
->uc_count
;
6132 if (alt_cnt
> niu_num_alt_addr(np
)) {
6134 np
->flags
|= NIU_FLAGS_PROMISC
;
6140 for (addr
= dev
->uc_list
; addr
; addr
= addr
->next
) {
6141 err
= niu_set_alt_mac(np
, index
,
6144 printk(KERN_WARNING PFX
"%s: Error %d "
6145 "adding alt mac %d\n",
6146 dev
->name
, err
, index
);
6147 err
= niu_enable_alt_mac(np
, index
, 1);
6149 printk(KERN_WARNING PFX
"%s: Error %d "
6150 "enabling alt mac %d\n",
6151 dev
->name
, err
, index
);
6157 if (np
->flags
& NIU_FLAGS_XMAC
)
6161 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
6162 err
= niu_enable_alt_mac(np
, i
, 0);
6164 printk(KERN_WARNING PFX
"%s: Error %d "
6165 "disabling alt mac %d\n",
6169 if (dev
->flags
& IFF_ALLMULTI
) {
6170 for (i
= 0; i
< 16; i
++)
6172 } else if (dev
->mc_count
> 0) {
6173 for (addr
= dev
->mc_list
; addr
; addr
= addr
->next
) {
6174 u32 crc
= ether_crc_le(ETH_ALEN
, addr
->da_addr
);
6177 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
6181 if (np
->flags
& NIU_FLAGS_MCAST
)
6182 niu_load_hash(np
, hash
);
6184 niu_enable_rx_mac(np
, 1);
6185 spin_unlock_irqrestore(&np
->lock
, flags
);
6188 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
6190 struct niu
*np
= netdev_priv(dev
);
6191 struct sockaddr
*addr
= p
;
6192 unsigned long flags
;
6194 if (!is_valid_ether_addr(addr
->sa_data
))
6197 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
6199 if (!netif_running(dev
))
6202 spin_lock_irqsave(&np
->lock
, flags
);
6203 niu_enable_rx_mac(np
, 0);
6204 niu_set_primary_mac(np
, dev
->dev_addr
);
6205 niu_enable_rx_mac(np
, 1);
6206 spin_unlock_irqrestore(&np
->lock
, flags
);
6211 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6216 static void niu_netif_stop(struct niu
*np
)
6218 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6220 niu_disable_napi(np
);
6222 netif_tx_disable(np
->dev
);
6225 static void niu_netif_start(struct niu
*np
)
6227 /* NOTE: unconditional netif_wake_queue is only appropriate
6228 * so long as all callers are assured to have free tx slots
6229 * (such as after niu_init_hw).
6231 netif_tx_wake_all_queues(np
->dev
);
6233 niu_enable_napi(np
);
6235 niu_enable_interrupts(np
, 1);
6238 static void niu_reset_buffers(struct niu
*np
)
6243 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6244 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6246 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
6249 page
= rp
->rxhash
[j
];
6252 (struct page
*) page
->mapping
;
6253 u64 base
= page
->index
;
6254 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6255 rp
->rbr
[k
++] = cpu_to_le32(base
);
6259 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6260 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6265 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6267 rp
->rbr_pending
= 0;
6268 rp
->rbr_refill_pending
= 0;
6272 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6273 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6275 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6276 if (rp
->tx_buffs
[j
].skb
)
6277 (void) release_tx_packet(np
, rp
, j
);
6280 rp
->pending
= MAX_TX_RING_SIZE
;
6288 static void niu_reset_task(struct work_struct
*work
)
6290 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6291 unsigned long flags
;
6294 spin_lock_irqsave(&np
->lock
, flags
);
6295 if (!netif_running(np
->dev
)) {
6296 spin_unlock_irqrestore(&np
->lock
, flags
);
6300 spin_unlock_irqrestore(&np
->lock
, flags
);
6302 del_timer_sync(&np
->timer
);
6306 spin_lock_irqsave(&np
->lock
, flags
);
6310 spin_unlock_irqrestore(&np
->lock
, flags
);
6312 niu_reset_buffers(np
);
6314 spin_lock_irqsave(&np
->lock
, flags
);
6316 err
= niu_init_hw(np
);
6318 np
->timer
.expires
= jiffies
+ HZ
;
6319 add_timer(&np
->timer
);
6320 niu_netif_start(np
);
6323 spin_unlock_irqrestore(&np
->lock
, flags
);
6326 static void niu_tx_timeout(struct net_device
*dev
)
6328 struct niu
*np
= netdev_priv(dev
);
6330 dev_err(np
->device
, PFX
"%s: Transmit timed out, resetting\n",
6333 schedule_work(&np
->reset_task
);
6336 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6337 u64 mapping
, u64 len
, u64 mark
,
6340 __le64
*desc
= &rp
->descr
[index
];
6342 *desc
= cpu_to_le64(mark
|
6343 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6344 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6345 (mapping
& TX_DESC_SAD
));
6348 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6349 u64 pad_bytes
, u64 len
)
6351 u16 eth_proto
, eth_proto_inner
;
6352 u64 csum_bits
, l3off
, ihl
, ret
;
6356 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6357 eth_proto_inner
= eth_proto
;
6358 if (eth_proto
== ETH_P_8021Q
) {
6359 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6360 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6362 eth_proto_inner
= be16_to_cpu(val
);
6366 switch (skb
->protocol
) {
6367 case __constant_htons(ETH_P_IP
):
6368 ip_proto
= ip_hdr(skb
)->protocol
;
6369 ihl
= ip_hdr(skb
)->ihl
;
6371 case __constant_htons(ETH_P_IPV6
):
6372 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6381 csum_bits
= TXHDR_CSUM_NONE
;
6382 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6385 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6387 (ip_proto
== IPPROTO_UDP
?
6388 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6390 start
= skb_transport_offset(skb
) -
6391 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6392 stuff
= start
+ skb
->csum_offset
;
6394 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6395 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6398 l3off
= skb_network_offset(skb
) -
6399 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6401 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6402 (len
<< TXHDR_LEN_SHIFT
) |
6403 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6404 (ihl
<< TXHDR_IHL_SHIFT
) |
6405 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6406 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6407 (ipv6
? TXHDR_IP_VER
: 0) |
6413 static int niu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6415 struct niu
*np
= netdev_priv(dev
);
6416 unsigned long align
, headroom
;
6417 struct netdev_queue
*txq
;
6418 struct tx_ring_info
*rp
;
6419 struct tx_pkt_hdr
*tp
;
6420 unsigned int len
, nfg
;
6421 struct ethhdr
*ehdr
;
6425 i
= skb_get_queue_mapping(skb
);
6426 rp
= &np
->tx_rings
[i
];
6427 txq
= netdev_get_tx_queue(dev
, i
);
6429 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6430 netif_tx_stop_queue(txq
);
6431 dev_err(np
->device
, PFX
"%s: BUG! Tx ring full when "
6432 "queue awake!\n", dev
->name
);
6434 return NETDEV_TX_BUSY
;
6437 if (skb
->len
< ETH_ZLEN
) {
6438 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6440 if (skb_pad(skb
, pad_bytes
))
6442 skb_put(skb
, pad_bytes
);
6445 len
= sizeof(struct tx_pkt_hdr
) + 15;
6446 if (skb_headroom(skb
) < len
) {
6447 struct sk_buff
*skb_new
;
6449 skb_new
= skb_realloc_headroom(skb
, len
);
6459 align
= ((unsigned long) skb
->data
& (16 - 1));
6460 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6462 ehdr
= (struct ethhdr
*) skb
->data
;
6463 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6465 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6466 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6469 len
= skb_headlen(skb
);
6470 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6471 len
, DMA_TO_DEVICE
);
6475 rp
->tx_buffs
[prod
].skb
= skb
;
6476 rp
->tx_buffs
[prod
].mapping
= mapping
;
6479 if (++rp
->mark_counter
== rp
->mark_freq
) {
6480 rp
->mark_counter
= 0;
6481 mrk
|= TX_DESC_MARK
;
6486 nfg
= skb_shinfo(skb
)->nr_frags
;
6488 tlen
-= MAX_TX_DESC_LEN
;
6493 unsigned int this_len
= len
;
6495 if (this_len
> MAX_TX_DESC_LEN
)
6496 this_len
= MAX_TX_DESC_LEN
;
6498 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6501 prod
= NEXT_TX(rp
, prod
);
6502 mapping
+= this_len
;
6506 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6507 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6510 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6511 frag
->page_offset
, len
,
6514 rp
->tx_buffs
[prod
].skb
= NULL
;
6515 rp
->tx_buffs
[prod
].mapping
= mapping
;
6517 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6519 prod
= NEXT_TX(rp
, prod
);
6522 if (prod
< rp
->prod
)
6523 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6526 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6528 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6529 netif_tx_stop_queue(txq
);
6530 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6531 netif_tx_wake_queue(txq
);
6534 dev
->trans_start
= jiffies
;
6537 return NETDEV_TX_OK
;
6545 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6547 struct niu
*np
= netdev_priv(dev
);
6548 int err
, orig_jumbo
, new_jumbo
;
6550 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6553 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6554 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6558 if (!netif_running(dev
) ||
6559 (orig_jumbo
== new_jumbo
))
6562 niu_full_shutdown(np
, dev
);
6564 niu_free_channels(np
);
6566 niu_enable_napi(np
);
6568 err
= niu_alloc_channels(np
);
6572 spin_lock_irq(&np
->lock
);
6574 err
= niu_init_hw(np
);
6576 init_timer(&np
->timer
);
6577 np
->timer
.expires
= jiffies
+ HZ
;
6578 np
->timer
.data
= (unsigned long) np
;
6579 np
->timer
.function
= niu_timer
;
6581 err
= niu_enable_interrupts(np
, 1);
6586 spin_unlock_irq(&np
->lock
);
6589 netif_tx_start_all_queues(dev
);
6590 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6591 netif_carrier_on(dev
);
6593 add_timer(&np
->timer
);
6599 static void niu_get_drvinfo(struct net_device
*dev
,
6600 struct ethtool_drvinfo
*info
)
6602 struct niu
*np
= netdev_priv(dev
);
6603 struct niu_vpd
*vpd
= &np
->vpd
;
6605 strcpy(info
->driver
, DRV_MODULE_NAME
);
6606 strcpy(info
->version
, DRV_MODULE_VERSION
);
6607 sprintf(info
->fw_version
, "%d.%d",
6608 vpd
->fcode_major
, vpd
->fcode_minor
);
6609 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6610 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6613 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6615 struct niu
*np
= netdev_priv(dev
);
6616 struct niu_link_config
*lp
;
6618 lp
= &np
->link_config
;
6620 memset(cmd
, 0, sizeof(*cmd
));
6621 cmd
->phy_address
= np
->phy_addr
;
6622 cmd
->supported
= lp
->supported
;
6623 cmd
->advertising
= lp
->advertising
;
6624 cmd
->autoneg
= lp
->autoneg
;
6625 cmd
->speed
= lp
->active_speed
;
6626 cmd
->duplex
= lp
->active_duplex
;
6631 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6636 static u32
niu_get_msglevel(struct net_device
*dev
)
6638 struct niu
*np
= netdev_priv(dev
);
6639 return np
->msg_enable
;
6642 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6644 struct niu
*np
= netdev_priv(dev
);
6645 np
->msg_enable
= value
;
6648 static int niu_get_eeprom_len(struct net_device
*dev
)
6650 struct niu
*np
= netdev_priv(dev
);
6652 return np
->eeprom_len
;
6655 static int niu_get_eeprom(struct net_device
*dev
,
6656 struct ethtool_eeprom
*eeprom
, u8
*data
)
6658 struct niu
*np
= netdev_priv(dev
);
6659 u32 offset
, len
, val
;
6661 offset
= eeprom
->offset
;
6664 if (offset
+ len
< offset
)
6666 if (offset
>= np
->eeprom_len
)
6668 if (offset
+ len
> np
->eeprom_len
)
6669 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6672 u32 b_offset
, b_count
;
6674 b_offset
= offset
& 3;
6675 b_count
= 4 - b_offset
;
6679 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6680 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6686 val
= nr64(ESPC_NCR(offset
/ 4));
6687 memcpy(data
, &val
, 4);
6693 val
= nr64(ESPC_NCR(offset
/ 4));
6694 memcpy(data
, &val
, len
);
6699 static int niu_ethflow_to_class(int flow_type
, u64
*class)
6701 switch (flow_type
) {
6703 *class = CLASS_CODE_TCP_IPV4
;
6706 *class = CLASS_CODE_UDP_IPV4
;
6708 case AH_ESP_V4_FLOW
:
6709 *class = CLASS_CODE_AH_ESP_IPV4
;
6712 *class = CLASS_CODE_SCTP_IPV4
;
6715 *class = CLASS_CODE_TCP_IPV6
;
6718 *class = CLASS_CODE_UDP_IPV6
;
6720 case AH_ESP_V6_FLOW
:
6721 *class = CLASS_CODE_AH_ESP_IPV6
;
6724 *class = CLASS_CODE_SCTP_IPV6
;
6733 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
6737 if (flow_key
& FLOW_KEY_PORT
)
6738 ethflow
|= RXH_DEV_PORT
;
6739 if (flow_key
& FLOW_KEY_L2DA
)
6740 ethflow
|= RXH_L2DA
;
6741 if (flow_key
& FLOW_KEY_VLAN
)
6742 ethflow
|= RXH_VLAN
;
6743 if (flow_key
& FLOW_KEY_IPSA
)
6744 ethflow
|= RXH_IP_SRC
;
6745 if (flow_key
& FLOW_KEY_IPDA
)
6746 ethflow
|= RXH_IP_DST
;
6747 if (flow_key
& FLOW_KEY_PROTO
)
6748 ethflow
|= RXH_L3_PROTO
;
6749 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
6750 ethflow
|= RXH_L4_B_0_1
;
6751 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
6752 ethflow
|= RXH_L4_B_2_3
;
6758 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
6762 if (ethflow
& RXH_DEV_PORT
)
6763 key
|= FLOW_KEY_PORT
;
6764 if (ethflow
& RXH_L2DA
)
6765 key
|= FLOW_KEY_L2DA
;
6766 if (ethflow
& RXH_VLAN
)
6767 key
|= FLOW_KEY_VLAN
;
6768 if (ethflow
& RXH_IP_SRC
)
6769 key
|= FLOW_KEY_IPSA
;
6770 if (ethflow
& RXH_IP_DST
)
6771 key
|= FLOW_KEY_IPDA
;
6772 if (ethflow
& RXH_L3_PROTO
)
6773 key
|= FLOW_KEY_PROTO
;
6774 if (ethflow
& RXH_L4_B_0_1
)
6775 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
6776 if (ethflow
& RXH_L4_B_2_3
)
6777 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
6785 static int niu_get_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6787 struct niu
*np
= netdev_priv(dev
);
6792 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6795 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6797 cmd
->data
= RXH_DISCARD
;
6800 cmd
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
6801 CLASS_CODE_USER_PROG1
]);
6805 static int niu_set_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6807 struct niu
*np
= netdev_priv(dev
);
6810 unsigned long flags
;
6812 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6815 if (class < CLASS_CODE_USER_PROG1
||
6816 class > CLASS_CODE_SCTP_IPV6
)
6819 if (cmd
->data
& RXH_DISCARD
) {
6820 niu_lock_parent(np
, flags
);
6821 flow_key
= np
->parent
->tcam_key
[class -
6822 CLASS_CODE_USER_PROG1
];
6823 flow_key
|= TCAM_KEY_DISC
;
6824 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6825 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6826 niu_unlock_parent(np
, flags
);
6829 /* Discard was set before, but is not set now */
6830 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6832 niu_lock_parent(np
, flags
);
6833 flow_key
= np
->parent
->tcam_key
[class -
6834 CLASS_CODE_USER_PROG1
];
6835 flow_key
&= ~TCAM_KEY_DISC
;
6836 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
6838 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
6840 niu_unlock_parent(np
, flags
);
6844 if (!niu_ethflow_to_flowkey(cmd
->data
, &flow_key
))
6847 niu_lock_parent(np
, flags
);
6848 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6849 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6850 niu_unlock_parent(np
, flags
);
6855 static const struct {
6856 const char string
[ETH_GSTRING_LEN
];
6857 } niu_xmac_stat_keys
[] = {
6860 { "tx_fifo_errors" },
6861 { "tx_overflow_errors" },
6862 { "tx_max_pkt_size_errors" },
6863 { "tx_underflow_errors" },
6864 { "rx_local_faults" },
6865 { "rx_remote_faults" },
6866 { "rx_link_faults" },
6867 { "rx_align_errors" },
6879 { "rx_code_violations" },
6880 { "rx_len_errors" },
6881 { "rx_crc_errors" },
6882 { "rx_underflows" },
6884 { "pause_off_state" },
6885 { "pause_on_state" },
6886 { "pause_received" },
6889 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
6891 static const struct {
6892 const char string
[ETH_GSTRING_LEN
];
6893 } niu_bmac_stat_keys
[] = {
6894 { "tx_underflow_errors" },
6895 { "tx_max_pkt_size_errors" },
6900 { "rx_align_errors" },
6901 { "rx_crc_errors" },
6902 { "rx_len_errors" },
6903 { "pause_off_state" },
6904 { "pause_on_state" },
6905 { "pause_received" },
6908 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
6910 static const struct {
6911 const char string
[ETH_GSTRING_LEN
];
6912 } niu_rxchan_stat_keys
[] = {
6920 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
6922 static const struct {
6923 const char string
[ETH_GSTRING_LEN
];
6924 } niu_txchan_stat_keys
[] = {
6931 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
6933 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
6935 struct niu
*np
= netdev_priv(dev
);
6938 if (stringset
!= ETH_SS_STATS
)
6941 if (np
->flags
& NIU_FLAGS_XMAC
) {
6942 memcpy(data
, niu_xmac_stat_keys
,
6943 sizeof(niu_xmac_stat_keys
));
6944 data
+= sizeof(niu_xmac_stat_keys
);
6946 memcpy(data
, niu_bmac_stat_keys
,
6947 sizeof(niu_bmac_stat_keys
));
6948 data
+= sizeof(niu_bmac_stat_keys
);
6950 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6951 memcpy(data
, niu_rxchan_stat_keys
,
6952 sizeof(niu_rxchan_stat_keys
));
6953 data
+= sizeof(niu_rxchan_stat_keys
);
6955 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6956 memcpy(data
, niu_txchan_stat_keys
,
6957 sizeof(niu_txchan_stat_keys
));
6958 data
+= sizeof(niu_txchan_stat_keys
);
6962 static int niu_get_stats_count(struct net_device
*dev
)
6964 struct niu
*np
= netdev_priv(dev
);
6966 return ((np
->flags
& NIU_FLAGS_XMAC
?
6967 NUM_XMAC_STAT_KEYS
:
6968 NUM_BMAC_STAT_KEYS
) +
6969 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
6970 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
));
6973 static void niu_get_ethtool_stats(struct net_device
*dev
,
6974 struct ethtool_stats
*stats
, u64
*data
)
6976 struct niu
*np
= netdev_priv(dev
);
6979 niu_sync_mac_stats(np
);
6980 if (np
->flags
& NIU_FLAGS_XMAC
) {
6981 memcpy(data
, &np
->mac_stats
.xmac
,
6982 sizeof(struct niu_xmac_stats
));
6983 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
6985 memcpy(data
, &np
->mac_stats
.bmac
,
6986 sizeof(struct niu_bmac_stats
));
6987 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
6989 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6990 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6992 data
[0] = rp
->rx_channel
;
6993 data
[1] = rp
->rx_packets
;
6994 data
[2] = rp
->rx_bytes
;
6995 data
[3] = rp
->rx_dropped
;
6996 data
[4] = rp
->rx_errors
;
6999 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7000 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
7002 data
[0] = rp
->tx_channel
;
7003 data
[1] = rp
->tx_packets
;
7004 data
[2] = rp
->tx_bytes
;
7005 data
[3] = rp
->tx_errors
;
7010 static u64
niu_led_state_save(struct niu
*np
)
7012 if (np
->flags
& NIU_FLAGS_XMAC
)
7013 return nr64_mac(XMAC_CONFIG
);
7015 return nr64_mac(BMAC_XIF_CONFIG
);
7018 static void niu_led_state_restore(struct niu
*np
, u64 val
)
7020 if (np
->flags
& NIU_FLAGS_XMAC
)
7021 nw64_mac(XMAC_CONFIG
, val
);
7023 nw64_mac(BMAC_XIF_CONFIG
, val
);
7026 static void niu_force_led(struct niu
*np
, int on
)
7030 if (np
->flags
& NIU_FLAGS_XMAC
) {
7032 bit
= XMAC_CONFIG_FORCE_LED_ON
;
7034 reg
= BMAC_XIF_CONFIG
;
7035 bit
= BMAC_XIF_CONFIG_LINK_LED
;
7038 val
= nr64_mac(reg
);
7046 static int niu_phys_id(struct net_device
*dev
, u32 data
)
7048 struct niu
*np
= netdev_priv(dev
);
7052 if (!netif_running(dev
))
7058 orig_led_state
= niu_led_state_save(np
);
7059 for (i
= 0; i
< (data
* 2); i
++) {
7060 int on
= ((i
% 2) == 0);
7062 niu_force_led(np
, on
);
7064 if (msleep_interruptible(500))
7067 niu_led_state_restore(np
, orig_led_state
);
7072 static const struct ethtool_ops niu_ethtool_ops
= {
7073 .get_drvinfo
= niu_get_drvinfo
,
7074 .get_link
= ethtool_op_get_link
,
7075 .get_msglevel
= niu_get_msglevel
,
7076 .set_msglevel
= niu_set_msglevel
,
7077 .get_eeprom_len
= niu_get_eeprom_len
,
7078 .get_eeprom
= niu_get_eeprom
,
7079 .get_settings
= niu_get_settings
,
7080 .set_settings
= niu_set_settings
,
7081 .get_strings
= niu_get_strings
,
7082 .get_stats_count
= niu_get_stats_count
,
7083 .get_ethtool_stats
= niu_get_ethtool_stats
,
7084 .phys_id
= niu_phys_id
,
7085 .get_rxhash
= niu_get_hash_opts
,
7086 .set_rxhash
= niu_set_hash_opts
,
7089 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
7092 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
7094 if (ldn
< 0 || ldn
> LDN_MAX
)
7097 parent
->ldg_map
[ldn
] = ldg
;
7099 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
7100 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7101 * the firmware, and we're not supposed to change them.
7102 * Validate the mapping, because if it's wrong we probably
7103 * won't get any interrupts and that's painful to debug.
7105 if (nr64(LDG_NUM(ldn
)) != ldg
) {
7106 dev_err(np
->device
, PFX
"Port %u, mis-matched "
7108 "for ldn %d, should be %d is %llu\n",
7110 (unsigned long long) nr64(LDG_NUM(ldn
)));
7114 nw64(LDG_NUM(ldn
), ldg
);
7119 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
7121 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
7125 nw64(LDG_TIMER_RES
, res
);
7130 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
7132 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
7133 (func
< 0 || func
> 3) ||
7134 (vector
< 0 || vector
> 0x1f))
7137 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
7142 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
7144 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
7145 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
7148 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
7152 nw64(ESPC_PIO_STAT
, frame
);
7156 frame
= nr64(ESPC_PIO_STAT
);
7157 if (frame
& ESPC_PIO_STAT_READ_END
)
7160 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
7161 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
7162 (unsigned long long) frame
);
7167 nw64(ESPC_PIO_STAT
, frame
);
7171 frame
= nr64(ESPC_PIO_STAT
);
7172 if (frame
& ESPC_PIO_STAT_READ_END
)
7175 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
7176 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
7177 (unsigned long long) frame
);
7181 frame
= nr64(ESPC_PIO_STAT
);
7182 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
7185 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
7187 int err
= niu_pci_eeprom_read(np
, off
);
7193 err
= niu_pci_eeprom_read(np
, off
+ 1);
7196 val
|= (err
& 0xff);
7201 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
7203 int err
= niu_pci_eeprom_read(np
, off
);
7210 err
= niu_pci_eeprom_read(np
, off
+ 1);
7214 val
|= (err
& 0xff) << 8;
7219 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
7226 for (i
= 0; i
< namebuf_len
; i
++) {
7227 int err
= niu_pci_eeprom_read(np
, off
+ i
);
7234 if (i
>= namebuf_len
)
7240 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
7242 struct niu_vpd
*vpd
= &np
->vpd
;
7243 int len
= strlen(vpd
->version
) + 1;
7244 const char *s
= vpd
->version
;
7247 for (i
= 0; i
< len
- 5; i
++) {
7248 if (!strncmp(s
+ i
, "FCode ", 5))
7255 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
7257 niudbg(PROBE
, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7258 vpd
->fcode_major
, vpd
->fcode_minor
);
7259 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
7260 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
7261 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
7262 np
->flags
|= NIU_FLAGS_VPD_VALID
;
7265 /* ESPC_PIO_EN_ENABLE must be set */
7266 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
7269 unsigned int found_mask
= 0;
7270 #define FOUND_MASK_MODEL 0x00000001
7271 #define FOUND_MASK_BMODEL 0x00000002
7272 #define FOUND_MASK_VERS 0x00000004
7273 #define FOUND_MASK_MAC 0x00000008
7274 #define FOUND_MASK_NMAC 0x00000010
7275 #define FOUND_MASK_PHY 0x00000020
7276 #define FOUND_MASK_ALL 0x0000003f
7278 niudbg(PROBE
, "VPD_SCAN: start[%x] end[%x]\n",
7280 while (start
< end
) {
7281 int len
, err
, instance
, type
, prop_len
;
7286 if (found_mask
== FOUND_MASK_ALL
) {
7287 niu_vpd_parse_version(np
);
7291 err
= niu_pci_eeprom_read(np
, start
+ 2);
7297 instance
= niu_pci_eeprom_read(np
, start
);
7298 type
= niu_pci_eeprom_read(np
, start
+ 3);
7299 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
7300 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
7306 if (!strcmp(namebuf
, "model")) {
7307 prop_buf
= np
->vpd
.model
;
7308 max_len
= NIU_VPD_MODEL_MAX
;
7309 found_mask
|= FOUND_MASK_MODEL
;
7310 } else if (!strcmp(namebuf
, "board-model")) {
7311 prop_buf
= np
->vpd
.board_model
;
7312 max_len
= NIU_VPD_BD_MODEL_MAX
;
7313 found_mask
|= FOUND_MASK_BMODEL
;
7314 } else if (!strcmp(namebuf
, "version")) {
7315 prop_buf
= np
->vpd
.version
;
7316 max_len
= NIU_VPD_VERSION_MAX
;
7317 found_mask
|= FOUND_MASK_VERS
;
7318 } else if (!strcmp(namebuf
, "local-mac-address")) {
7319 prop_buf
= np
->vpd
.local_mac
;
7321 found_mask
|= FOUND_MASK_MAC
;
7322 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
7323 prop_buf
= &np
->vpd
.mac_num
;
7325 found_mask
|= FOUND_MASK_NMAC
;
7326 } else if (!strcmp(namebuf
, "phy-type")) {
7327 prop_buf
= np
->vpd
.phy_type
;
7328 max_len
= NIU_VPD_PHY_TYPE_MAX
;
7329 found_mask
|= FOUND_MASK_PHY
;
7332 if (max_len
&& prop_len
> max_len
) {
7333 dev_err(np
->device
, PFX
"Property '%s' length (%d) is "
7334 "too long.\n", namebuf
, prop_len
);
7339 u32 off
= start
+ 5 + err
;
7342 niudbg(PROBE
, "VPD_SCAN: Reading in property [%s] "
7343 "len[%d]\n", namebuf
, prop_len
);
7344 for (i
= 0; i
< prop_len
; i
++)
7345 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
7354 /* ESPC_PIO_EN_ENABLE must be set */
7355 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
7360 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
7366 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
7367 u32 here
= start
+ offset
;
7370 err
= niu_pci_eeprom_read(np
, here
);
7374 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
7378 here
= start
+ offset
+ 3;
7379 end
= start
+ offset
+ err
;
7383 err
= niu_pci_vpd_scan_props(np
, here
, end
);
7384 if (err
< 0 || err
== 1)
7389 /* ESPC_PIO_EN_ENABLE must be set */
7390 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
7392 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
7395 while (start
< end
) {
7398 /* ROM header signature? */
7399 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7403 /* Apply offset to PCI data structure. */
7404 err
= niu_pci_eeprom_read16(np
, start
+ 23);
7409 /* Check for "PCIR" signature. */
7410 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7413 err
= niu_pci_eeprom_read16(np
, start
+ 2);
7417 /* Check for OBP image type. */
7418 err
= niu_pci_eeprom_read(np
, start
+ 20);
7422 err
= niu_pci_eeprom_read(np
, ret
+ 2);
7426 start
= ret
+ (err
* 512);
7430 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
7435 err
= niu_pci_eeprom_read(np
, ret
+ 0);
7445 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
7446 const char *phy_prop
)
7448 if (!strcmp(phy_prop
, "mif")) {
7449 /* 1G copper, MII */
7450 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7452 np
->mac_xcvr
= MAC_XCVR_MII
;
7453 } else if (!strcmp(phy_prop
, "xgf")) {
7454 /* 10G fiber, XPCS */
7455 np
->flags
|= (NIU_FLAGS_10G
|
7457 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7458 } else if (!strcmp(phy_prop
, "pcs")) {
7460 np
->flags
&= ~NIU_FLAGS_10G
;
7461 np
->flags
|= NIU_FLAGS_FIBER
;
7462 np
->mac_xcvr
= MAC_XCVR_PCS
;
7463 } else if (!strcmp(phy_prop
, "xgc")) {
7464 /* 10G copper, XPCS */
7465 np
->flags
|= NIU_FLAGS_10G
;
7466 np
->flags
&= ~NIU_FLAGS_FIBER
;
7467 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7468 } else if (!strcmp(phy_prop
, "xgsd") || !strcmp(phy_prop
, "gsd")) {
7469 /* 10G Serdes or 1G Serdes, default to 10G */
7470 np
->flags
|= NIU_FLAGS_10G
;
7471 np
->flags
&= ~NIU_FLAGS_FIBER
;
7472 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
7473 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7480 static int niu_pci_vpd_get_nports(struct niu
*np
)
7484 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
7485 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
7486 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
7487 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
7488 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
7490 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
7491 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
7492 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
7493 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
7500 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
7502 struct net_device
*dev
= np
->dev
;
7503 struct niu_vpd
*vpd
= &np
->vpd
;
7506 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
7507 dev_err(np
->device
, PFX
"VPD MAC invalid, "
7508 "falling back to SPROM.\n");
7510 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7514 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
7515 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
7516 np
->flags
|= NIU_FLAGS_10G
;
7517 np
->flags
&= ~NIU_FLAGS_FIBER
;
7518 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
7519 np
->mac_xcvr
= MAC_XCVR_PCS
;
7521 np
->flags
|= NIU_FLAGS_FIBER
;
7522 np
->flags
&= ~NIU_FLAGS_10G
;
7524 if (np
->flags
& NIU_FLAGS_10G
)
7525 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7526 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
7527 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
7528 NIU_FLAGS_HOTPLUG_PHY
);
7529 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
7530 dev_err(np
->device
, PFX
"Illegal phy string [%s].\n",
7532 dev_err(np
->device
, PFX
"Falling back to SPROM.\n");
7533 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7537 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
7539 val8
= dev
->perm_addr
[5];
7540 dev
->perm_addr
[5] += np
->port
;
7541 if (dev
->perm_addr
[5] < val8
)
7542 dev
->perm_addr
[4]++;
7544 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7547 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
7549 struct net_device
*dev
= np
->dev
;
7554 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
7555 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
7558 np
->eeprom_len
= len
;
7560 niudbg(PROBE
, "SPROM: Image size %llu\n", (unsigned long long) val
);
7563 for (i
= 0; i
< len
; i
++) {
7564 val
= nr64(ESPC_NCR(i
));
7565 sum
+= (val
>> 0) & 0xff;
7566 sum
+= (val
>> 8) & 0xff;
7567 sum
+= (val
>> 16) & 0xff;
7568 sum
+= (val
>> 24) & 0xff;
7570 niudbg(PROBE
, "SPROM: Checksum %x\n", (int)(sum
& 0xff));
7571 if ((sum
& 0xff) != 0xab) {
7572 dev_err(np
->device
, PFX
"Bad SPROM checksum "
7573 "(%x, should be 0xab)\n", (int) (sum
& 0xff));
7577 val
= nr64(ESPC_PHY_TYPE
);
7580 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
7581 ESPC_PHY_TYPE_PORT0_SHIFT
;
7584 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
7585 ESPC_PHY_TYPE_PORT1_SHIFT
;
7588 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
7589 ESPC_PHY_TYPE_PORT2_SHIFT
;
7592 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
7593 ESPC_PHY_TYPE_PORT3_SHIFT
;
7596 dev_err(np
->device
, PFX
"Bogus port number %u\n",
7600 niudbg(PROBE
, "SPROM: PHY type %x\n", val8
);
7603 case ESPC_PHY_TYPE_1G_COPPER
:
7604 /* 1G copper, MII */
7605 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7607 np
->mac_xcvr
= MAC_XCVR_MII
;
7610 case ESPC_PHY_TYPE_1G_FIBER
:
7612 np
->flags
&= ~NIU_FLAGS_10G
;
7613 np
->flags
|= NIU_FLAGS_FIBER
;
7614 np
->mac_xcvr
= MAC_XCVR_PCS
;
7617 case ESPC_PHY_TYPE_10G_COPPER
:
7618 /* 10G copper, XPCS */
7619 np
->flags
|= NIU_FLAGS_10G
;
7620 np
->flags
&= ~NIU_FLAGS_FIBER
;
7621 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7624 case ESPC_PHY_TYPE_10G_FIBER
:
7625 /* 10G fiber, XPCS */
7626 np
->flags
|= (NIU_FLAGS_10G
|
7628 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7632 dev_err(np
->device
, PFX
"Bogus SPROM phy type %u\n", val8
);
7636 val
= nr64(ESPC_MAC_ADDR0
);
7637 niudbg(PROBE
, "SPROM: MAC_ADDR0[%08llx]\n",
7638 (unsigned long long) val
);
7639 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
7640 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
7641 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
7642 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
7644 val
= nr64(ESPC_MAC_ADDR1
);
7645 niudbg(PROBE
, "SPROM: MAC_ADDR1[%08llx]\n",
7646 (unsigned long long) val
);
7647 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
7648 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
7650 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
7651 dev_err(np
->device
, PFX
"SPROM MAC address invalid\n");
7652 dev_err(np
->device
, PFX
"[ \n");
7653 for (i
= 0; i
< 6; i
++)
7654 printk("%02x ", dev
->perm_addr
[i
]);
7659 val8
= dev
->perm_addr
[5];
7660 dev
->perm_addr
[5] += np
->port
;
7661 if (dev
->perm_addr
[5] < val8
)
7662 dev
->perm_addr
[4]++;
7664 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7666 val
= nr64(ESPC_MOD_STR_LEN
);
7667 niudbg(PROBE
, "SPROM: MOD_STR_LEN[%llu]\n",
7668 (unsigned long long) val
);
7672 for (i
= 0; i
< val
; i
+= 4) {
7673 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
7675 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
7676 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
7677 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
7678 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
7680 np
->vpd
.model
[val
] = '\0';
7682 val
= nr64(ESPC_BD_MOD_STR_LEN
);
7683 niudbg(PROBE
, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7684 (unsigned long long) val
);
7688 for (i
= 0; i
< val
; i
+= 4) {
7689 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
7691 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
7692 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
7693 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
7694 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
7696 np
->vpd
.board_model
[val
] = '\0';
7699 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
7700 niudbg(PROBE
, "SPROM: NUM_PORTS_MACS[%d]\n",
7706 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
7708 struct niu_parent
*parent
= np
->parent
;
7711 np
->flags
|= NIU_FLAGS_XMAC
;
7713 if (!parent
->num_ports
) {
7714 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
7715 parent
->num_ports
= 2;
7717 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
7718 if (!parent
->num_ports
) {
7719 /* Fall back to SPROM as last resort.
7720 * This will fail on most cards.
7722 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
7723 ESPC_NUM_PORTS_MACS_VAL
;
7725 /* All of the current probing methods fail on
7726 * Maramba on-board parts.
7728 if (!parent
->num_ports
)
7729 parent
->num_ports
= 4;
7734 niudbg(PROBE
, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7735 np
->port
, parent
->num_ports
);
7736 if (np
->port
>= parent
->num_ports
)
7742 static int __devinit
phy_record(struct niu_parent
*parent
,
7743 struct phy_probe_info
*p
,
7744 int dev_id_1
, int dev_id_2
, u8 phy_port
,
7747 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
7750 if (dev_id_1
< 0 || dev_id_2
< 0)
7752 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
7753 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
7754 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
7755 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
7758 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
7762 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7764 (type
== PHY_TYPE_PMA_PMD
?
7766 (type
== PHY_TYPE_PCS
?
7770 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
7771 printk(KERN_ERR PFX
"Too many PHY ports.\n");
7775 p
->phy_id
[type
][idx
] = id
;
7776 p
->phy_port
[type
][idx
] = phy_port
;
7777 p
->cur
[type
] = idx
+ 1;
7781 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
7785 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
7786 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
7789 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
7790 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
7797 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
7803 for (port
= 8; port
< 32; port
++) {
7804 if (port_has_10g(p
, port
)) {
7814 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
7817 if (p
->cur
[PHY_TYPE_MII
])
7818 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
7820 return p
->cur
[PHY_TYPE_MII
];
7823 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
7825 int num_ports
= parent
->num_ports
;
7828 for (i
= 0; i
< num_ports
; i
++) {
7829 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
7830 parent
->txchan_per_port
[i
] = (16 / num_ports
);
7832 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7835 parent
->rxchan_per_port
[i
],
7836 parent
->txchan_per_port
[i
]);
7840 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
7841 int num_10g
, int num_1g
)
7843 int num_ports
= parent
->num_ports
;
7844 int rx_chans_per_10g
, rx_chans_per_1g
;
7845 int tx_chans_per_10g
, tx_chans_per_1g
;
7846 int i
, tot_rx
, tot_tx
;
7848 if (!num_10g
|| !num_1g
) {
7849 rx_chans_per_10g
= rx_chans_per_1g
=
7850 (NIU_NUM_RXCHAN
/ num_ports
);
7851 tx_chans_per_10g
= tx_chans_per_1g
=
7852 (NIU_NUM_TXCHAN
/ num_ports
);
7854 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
7855 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
7856 (rx_chans_per_1g
* num_1g
)) /
7859 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
7860 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
7861 (tx_chans_per_1g
* num_1g
)) /
7865 tot_rx
= tot_tx
= 0;
7866 for (i
= 0; i
< num_ports
; i
++) {
7867 int type
= phy_decode(parent
->port_phy
, i
);
7869 if (type
== PORT_TYPE_10G
) {
7870 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
7871 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
7873 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
7874 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
7876 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7879 parent
->rxchan_per_port
[i
],
7880 parent
->txchan_per_port
[i
]);
7881 tot_rx
+= parent
->rxchan_per_port
[i
];
7882 tot_tx
+= parent
->txchan_per_port
[i
];
7885 if (tot_rx
> NIU_NUM_RXCHAN
) {
7886 printk(KERN_ERR PFX
"niu%d: Too many RX channels (%d), "
7887 "resetting to one per port.\n",
7888 parent
->index
, tot_rx
);
7889 for (i
= 0; i
< num_ports
; i
++)
7890 parent
->rxchan_per_port
[i
] = 1;
7892 if (tot_tx
> NIU_NUM_TXCHAN
) {
7893 printk(KERN_ERR PFX
"niu%d: Too many TX channels (%d), "
7894 "resetting to one per port.\n",
7895 parent
->index
, tot_tx
);
7896 for (i
= 0; i
< num_ports
; i
++)
7897 parent
->txchan_per_port
[i
] = 1;
7899 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
7900 printk(KERN_WARNING PFX
"niu%d: Driver bug, wasted channels, "
7902 parent
->index
, tot_rx
, tot_tx
);
7906 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
7907 int num_10g
, int num_1g
)
7909 int i
, num_ports
= parent
->num_ports
;
7910 int rdc_group
, rdc_groups_per_port
;
7911 int rdc_channel_base
;
7914 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
7916 rdc_channel_base
= 0;
7918 for (i
= 0; i
< num_ports
; i
++) {
7919 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
7920 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
7921 int this_channel_offset
;
7923 tp
->first_table_num
= rdc_group
;
7924 tp
->num_tables
= rdc_groups_per_port
;
7925 this_channel_offset
= 0;
7926 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
7927 struct rdc_table
*rt
= &tp
->tables
[grp
];
7930 pr_info(PFX
"niu%d: Port %d RDC tbl(%d) [ ",
7931 parent
->index
, i
, tp
->first_table_num
+ grp
);
7932 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
7933 rt
->rxdma_channel
[slot
] =
7934 rdc_channel_base
+ this_channel_offset
;
7936 printk("%d ", rt
->rxdma_channel
[slot
]);
7938 if (++this_channel_offset
== num_channels
)
7939 this_channel_offset
= 0;
7944 parent
->rdc_default
[i
] = rdc_channel_base
;
7946 rdc_channel_base
+= num_channels
;
7947 rdc_group
+= rdc_groups_per_port
;
7951 static int __devinit
fill_phy_probe_info(struct niu
*np
,
7952 struct niu_parent
*parent
,
7953 struct phy_probe_info
*info
)
7955 unsigned long flags
;
7958 memset(info
, 0, sizeof(*info
));
7960 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
7961 niu_lock_parent(np
, flags
);
7963 for (port
= 8; port
< 32; port
++) {
7964 int dev_id_1
, dev_id_2
;
7966 dev_id_1
= mdio_read(np
, port
,
7967 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
7968 dev_id_2
= mdio_read(np
, port
,
7969 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
7970 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7974 dev_id_1
= mdio_read(np
, port
,
7975 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
7976 dev_id_2
= mdio_read(np
, port
,
7977 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
7978 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7982 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
7983 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
7984 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7989 niu_unlock_parent(np
, flags
);
7994 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
7996 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
7997 int lowest_10g
, lowest_1g
;
7998 int num_10g
, num_1g
;
8002 num_10g
= num_1g
= 0;
8004 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8005 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8008 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
8009 parent
->num_ports
= 4;
8010 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8011 phy_encode(PORT_TYPE_1G
, 1) |
8012 phy_encode(PORT_TYPE_1G
, 2) |
8013 phy_encode(PORT_TYPE_1G
, 3));
8014 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8017 parent
->num_ports
= 2;
8018 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8019 phy_encode(PORT_TYPE_10G
, 1));
8020 } else if ((np
->flags
& NIU_FLAGS_XCVR_SERDES
) &&
8021 (parent
->plat_type
== PLAT_TYPE_NIU
)) {
8022 /* this is the Monza case */
8023 if (np
->flags
& NIU_FLAGS_10G
) {
8024 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8025 phy_encode(PORT_TYPE_10G
, 1));
8027 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8028 phy_encode(PORT_TYPE_1G
, 1));
8031 err
= fill_phy_probe_info(np
, parent
, info
);
8035 num_10g
= count_10g_ports(info
, &lowest_10g
);
8036 num_1g
= count_1g_ports(info
, &lowest_1g
);
8038 switch ((num_10g
<< 4) | num_1g
) {
8040 if (lowest_1g
== 10)
8041 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8042 else if (lowest_1g
== 26)
8043 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8045 goto unknown_vg_1g_port
;
8049 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8050 phy_encode(PORT_TYPE_10G
, 1) |
8051 phy_encode(PORT_TYPE_1G
, 2) |
8052 phy_encode(PORT_TYPE_1G
, 3));
8056 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8057 phy_encode(PORT_TYPE_10G
, 1));
8061 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
8065 if (lowest_1g
== 10)
8066 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8067 else if (lowest_1g
== 26)
8068 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8070 goto unknown_vg_1g_port
;
8074 if ((lowest_10g
& 0x7) == 0)
8075 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8076 phy_encode(PORT_TYPE_1G
, 1) |
8077 phy_encode(PORT_TYPE_1G
, 2) |
8078 phy_encode(PORT_TYPE_1G
, 3));
8080 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8081 phy_encode(PORT_TYPE_10G
, 1) |
8082 phy_encode(PORT_TYPE_1G
, 2) |
8083 phy_encode(PORT_TYPE_1G
, 3));
8087 if (lowest_1g
== 10)
8088 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8089 else if (lowest_1g
== 26)
8090 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8092 goto unknown_vg_1g_port
;
8094 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8095 phy_encode(PORT_TYPE_1G
, 1) |
8096 phy_encode(PORT_TYPE_1G
, 2) |
8097 phy_encode(PORT_TYPE_1G
, 3));
8101 printk(KERN_ERR PFX
"Unsupported port config "
8108 parent
->port_phy
= val
;
8110 if (parent
->plat_type
== PLAT_TYPE_NIU
)
8111 niu_n2_divide_channels(parent
);
8113 niu_divide_channels(parent
, num_10g
, num_1g
);
8115 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
8120 printk(KERN_ERR PFX
"Cannot identify platform type, 1gport=%d\n",
8125 static int __devinit
niu_probe_ports(struct niu
*np
)
8127 struct niu_parent
*parent
= np
->parent
;
8130 niudbg(PROBE
, "niu_probe_ports(): port_phy[%08x]\n",
8133 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
8134 err
= walk_phys(np
, parent
);
8138 niu_set_ldg_timer_res(np
, 2);
8139 for (i
= 0; i
<= LDN_MAX
; i
++)
8140 niu_ldn_irq_enable(np
, i
, 0);
8143 if (parent
->port_phy
== PORT_PHY_INVALID
)
8149 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
8151 struct niu_classifier
*cp
= &np
->clas
;
8153 niudbg(PROBE
, "niu_classifier_swstate_init: num_tcam(%d)\n",
8154 np
->parent
->tcam_num_entries
);
8156 cp
->tcam_index
= (u16
) np
->port
;
8157 cp
->h1_init
= 0xffffffff;
8158 cp
->h2_init
= 0xffff;
8160 return fflp_early_init(np
);
8163 static void __devinit
niu_link_config_init(struct niu
*np
)
8165 struct niu_link_config
*lp
= &np
->link_config
;
8167 lp
->advertising
= (ADVERTISED_10baseT_Half
|
8168 ADVERTISED_10baseT_Full
|
8169 ADVERTISED_100baseT_Half
|
8170 ADVERTISED_100baseT_Full
|
8171 ADVERTISED_1000baseT_Half
|
8172 ADVERTISED_1000baseT_Full
|
8173 ADVERTISED_10000baseT_Full
|
8174 ADVERTISED_Autoneg
);
8175 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
8176 lp
->duplex
= lp
->active_duplex
= DUPLEX_INVALID
;
8178 lp
->loopback_mode
= LOOPBACK_MAC
;
8179 lp
->active_speed
= SPEED_10000
;
8180 lp
->active_duplex
= DUPLEX_FULL
;
8182 lp
->loopback_mode
= LOOPBACK_DISABLED
;
8186 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
8190 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
8191 np
->ipp_off
= 0x00000;
8192 np
->pcs_off
= 0x04000;
8193 np
->xpcs_off
= 0x02000;
8197 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
8198 np
->ipp_off
= 0x08000;
8199 np
->pcs_off
= 0x0a000;
8200 np
->xpcs_off
= 0x08000;
8204 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
8205 np
->ipp_off
= 0x04000;
8206 np
->pcs_off
= 0x0e000;
8207 np
->xpcs_off
= ~0UL;
8211 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
8212 np
->ipp_off
= 0x0c000;
8213 np
->pcs_off
= 0x12000;
8214 np
->xpcs_off
= ~0UL;
8218 dev_err(np
->device
, PFX
"Port %u is invalid, cannot "
8219 "compute MAC block offset.\n", np
->port
);
8226 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
8228 struct msix_entry msi_vec
[NIU_NUM_LDG
];
8229 struct niu_parent
*parent
= np
->parent
;
8230 struct pci_dev
*pdev
= np
->pdev
;
8231 int i
, num_irqs
, err
;
8234 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
8235 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
8236 ldg_num_map
[i
] = first_ldg
+ i
;
8238 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
8239 parent
->txchan_per_port
[np
->port
] +
8240 (np
->port
== 0 ? 3 : 1));
8241 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
8244 for (i
= 0; i
< num_irqs
; i
++) {
8245 msi_vec
[i
].vector
= 0;
8246 msi_vec
[i
].entry
= i
;
8249 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
8251 np
->flags
&= ~NIU_FLAGS_MSIX
;
8259 np
->flags
|= NIU_FLAGS_MSIX
;
8260 for (i
= 0; i
< num_irqs
; i
++)
8261 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
8262 np
->num_ldg
= num_irqs
;
8265 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
8267 #ifdef CONFIG_SPARC64
8268 struct of_device
*op
= np
->op
;
8269 const u32
*int_prop
;
8272 int_prop
= of_get_property(op
->node
, "interrupts", NULL
);
8276 for (i
= 0; i
< op
->num_irqs
; i
++) {
8277 ldg_num_map
[i
] = int_prop
[i
];
8278 np
->ldg
[i
].irq
= op
->irqs
[i
];
8281 np
->num_ldg
= op
->num_irqs
;
8289 static int __devinit
niu_ldg_init(struct niu
*np
)
8291 struct niu_parent
*parent
= np
->parent
;
8292 u8 ldg_num_map
[NIU_NUM_LDG
];
8293 int first_chan
, num_chan
;
8294 int i
, err
, ldg_rotor
;
8298 np
->ldg
[0].irq
= np
->dev
->irq
;
8299 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8300 err
= niu_n2_irq_init(np
, ldg_num_map
);
8304 niu_try_msix(np
, ldg_num_map
);
8307 for (i
= 0; i
< np
->num_ldg
; i
++) {
8308 struct niu_ldg
*lp
= &np
->ldg
[i
];
8310 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
8313 lp
->ldg_num
= ldg_num_map
[i
];
8314 lp
->timer
= 2; /* XXX */
8316 /* On N2 NIU the firmware has setup the SID mappings so they go
8317 * to the correct values that will route the LDG to the proper
8318 * interrupt in the NCU interrupt table.
8320 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
8321 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
8327 /* We adopt the LDG assignment ordering used by the N2 NIU
8328 * 'interrupt' properties because that simplifies a lot of
8329 * things. This ordering is:
8332 * MIF (if port zero)
8333 * SYSERR (if port zero)
8340 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
8346 if (ldg_rotor
== np
->num_ldg
)
8350 err
= niu_ldg_assign_ldn(np
, parent
,
8351 ldg_num_map
[ldg_rotor
],
8357 if (ldg_rotor
== np
->num_ldg
)
8360 err
= niu_ldg_assign_ldn(np
, parent
,
8361 ldg_num_map
[ldg_rotor
],
8367 if (ldg_rotor
== np
->num_ldg
)
8373 for (i
= 0; i
< port
; i
++)
8374 first_chan
+= parent
->rxchan_per_port
[port
];
8375 num_chan
= parent
->rxchan_per_port
[port
];
8377 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8378 err
= niu_ldg_assign_ldn(np
, parent
,
8379 ldg_num_map
[ldg_rotor
],
8384 if (ldg_rotor
== np
->num_ldg
)
8389 for (i
= 0; i
< port
; i
++)
8390 first_chan
+= parent
->txchan_per_port
[port
];
8391 num_chan
= parent
->txchan_per_port
[port
];
8392 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8393 err
= niu_ldg_assign_ldn(np
, parent
,
8394 ldg_num_map
[ldg_rotor
],
8399 if (ldg_rotor
== np
->num_ldg
)
8406 static void __devexit
niu_ldg_free(struct niu
*np
)
8408 if (np
->flags
& NIU_FLAGS_MSIX
)
8409 pci_disable_msix(np
->pdev
);
8412 static int __devinit
niu_get_of_props(struct niu
*np
)
8414 #ifdef CONFIG_SPARC64
8415 struct net_device
*dev
= np
->dev
;
8416 struct device_node
*dp
;
8417 const char *phy_type
;
8422 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8425 dp
= pci_device_to_OF_node(np
->pdev
);
8427 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
8429 dev_err(np
->device
, PFX
"%s: OF node lacks "
8430 "phy-type property\n",
8435 if (!strcmp(phy_type
, "none"))
8438 strcpy(np
->vpd
.phy_type
, phy_type
);
8440 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8441 dev_err(np
->device
, PFX
"%s: Illegal phy string [%s].\n",
8442 dp
->full_name
, np
->vpd
.phy_type
);
8446 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
8448 dev_err(np
->device
, PFX
"%s: OF node lacks "
8449 "local-mac-address property\n",
8453 if (prop_len
!= dev
->addr_len
) {
8454 dev_err(np
->device
, PFX
"%s: OF MAC address prop len (%d) "
8456 dp
->full_name
, prop_len
);
8458 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
8459 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8462 dev_err(np
->device
, PFX
"%s: OF MAC address is invalid\n",
8464 dev_err(np
->device
, PFX
"%s: [ \n",
8466 for (i
= 0; i
< 6; i
++)
8467 printk("%02x ", dev
->perm_addr
[i
]);
8472 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8474 model
= of_get_property(dp
, "model", &prop_len
);
8477 strcpy(np
->vpd
.model
, model
);
8485 static int __devinit
niu_get_invariants(struct niu
*np
)
8487 int err
, have_props
;
8490 err
= niu_get_of_props(np
);
8496 err
= niu_init_mac_ipp_pcs_base(np
);
8501 err
= niu_get_and_validate_port(np
);
8506 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8509 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
8510 offset
= niu_pci_vpd_offset(np
);
8511 niudbg(PROBE
, "niu_get_invariants: VPD offset [%08x]\n",
8514 niu_pci_vpd_fetch(np
, offset
);
8515 nw64(ESPC_PIO_EN
, 0);
8517 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
8518 niu_pci_vpd_validate(np
);
8519 err
= niu_get_and_validate_port(np
);
8524 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
8525 err
= niu_get_and_validate_port(np
);
8528 err
= niu_pci_probe_sprom(np
);
8534 err
= niu_probe_ports(np
);
8540 niu_classifier_swstate_init(np
);
8541 niu_link_config_init(np
);
8543 err
= niu_determine_phy_disposition(np
);
8545 err
= niu_init_link(np
);
8550 static LIST_HEAD(niu_parent_list
);
8551 static DEFINE_MUTEX(niu_parent_lock
);
8552 static int niu_parent_index
;
8554 static ssize_t
show_port_phy(struct device
*dev
,
8555 struct device_attribute
*attr
, char *buf
)
8557 struct platform_device
*plat_dev
= to_platform_device(dev
);
8558 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8559 u32 port_phy
= p
->port_phy
;
8560 char *orig_buf
= buf
;
8563 if (port_phy
== PORT_PHY_UNKNOWN
||
8564 port_phy
== PORT_PHY_INVALID
)
8567 for (i
= 0; i
< p
->num_ports
; i
++) {
8568 const char *type_str
;
8571 type
= phy_decode(port_phy
, i
);
8572 if (type
== PORT_TYPE_10G
)
8577 (i
== 0) ? "%s" : " %s",
8580 buf
+= sprintf(buf
, "\n");
8581 return buf
- orig_buf
;
8584 static ssize_t
show_plat_type(struct device
*dev
,
8585 struct device_attribute
*attr
, char *buf
)
8587 struct platform_device
*plat_dev
= to_platform_device(dev
);
8588 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8589 const char *type_str
;
8591 switch (p
->plat_type
) {
8592 case PLAT_TYPE_ATLAS
:
8598 case PLAT_TYPE_VF_P0
:
8601 case PLAT_TYPE_VF_P1
:
8605 type_str
= "unknown";
8609 return sprintf(buf
, "%s\n", type_str
);
8612 static ssize_t
__show_chan_per_port(struct device
*dev
,
8613 struct device_attribute
*attr
, char *buf
,
8616 struct platform_device
*plat_dev
= to_platform_device(dev
);
8617 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8618 char *orig_buf
= buf
;
8622 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
8624 for (i
= 0; i
< p
->num_ports
; i
++) {
8626 (i
== 0) ? "%d" : " %d",
8629 buf
+= sprintf(buf
, "\n");
8631 return buf
- orig_buf
;
8634 static ssize_t
show_rxchan_per_port(struct device
*dev
,
8635 struct device_attribute
*attr
, char *buf
)
8637 return __show_chan_per_port(dev
, attr
, buf
, 1);
8640 static ssize_t
show_txchan_per_port(struct device
*dev
,
8641 struct device_attribute
*attr
, char *buf
)
8643 return __show_chan_per_port(dev
, attr
, buf
, 1);
8646 static ssize_t
show_num_ports(struct device
*dev
,
8647 struct device_attribute
*attr
, char *buf
)
8649 struct platform_device
*plat_dev
= to_platform_device(dev
);
8650 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8652 return sprintf(buf
, "%d\n", p
->num_ports
);
8655 static struct device_attribute niu_parent_attributes
[] = {
8656 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
8657 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
8658 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
8659 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
8660 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
8664 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
8665 union niu_parent_id
*id
,
8668 struct platform_device
*plat_dev
;
8669 struct niu_parent
*p
;
8672 niudbg(PROBE
, "niu_new_parent: Creating new parent.\n");
8674 plat_dev
= platform_device_register_simple("niu", niu_parent_index
,
8679 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
8680 int err
= device_create_file(&plat_dev
->dev
,
8681 &niu_parent_attributes
[i
]);
8683 goto fail_unregister
;
8686 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
8688 goto fail_unregister
;
8690 p
->index
= niu_parent_index
++;
8692 plat_dev
->dev
.platform_data
= p
;
8693 p
->plat_dev
= plat_dev
;
8695 memcpy(&p
->id
, id
, sizeof(*id
));
8696 p
->plat_type
= ptype
;
8697 INIT_LIST_HEAD(&p
->list
);
8698 atomic_set(&p
->refcnt
, 0);
8699 list_add(&p
->list
, &niu_parent_list
);
8700 spin_lock_init(&p
->lock
);
8702 p
->rxdma_clock_divider
= 7500;
8704 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
8705 if (p
->plat_type
== PLAT_TYPE_NIU
)
8706 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
8708 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
8709 int index
= i
- CLASS_CODE_USER_PROG1
;
8711 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
8712 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
8715 (FLOW_KEY_L4_BYTE12
<<
8716 FLOW_KEY_L4_0_SHIFT
) |
8717 (FLOW_KEY_L4_BYTE12
<<
8718 FLOW_KEY_L4_1_SHIFT
));
8721 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
8722 p
->ldg_map
[i
] = LDG_INVALID
;
8727 platform_device_unregister(plat_dev
);
8731 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
8732 union niu_parent_id
*id
,
8735 struct niu_parent
*p
, *tmp
;
8736 int port
= np
->port
;
8738 niudbg(PROBE
, "niu_get_parent: platform_type[%u] port[%u]\n",
8741 mutex_lock(&niu_parent_lock
);
8743 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
8744 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
8750 p
= niu_new_parent(np
, id
, ptype
);
8756 sprintf(port_name
, "port%d", port
);
8757 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
8761 p
->ports
[port
] = np
;
8762 atomic_inc(&p
->refcnt
);
8765 mutex_unlock(&niu_parent_lock
);
8770 static void niu_put_parent(struct niu
*np
)
8772 struct niu_parent
*p
= np
->parent
;
8776 BUG_ON(!p
|| p
->ports
[port
] != np
);
8778 niudbg(PROBE
, "niu_put_parent: port[%u]\n", port
);
8780 sprintf(port_name
, "port%d", port
);
8782 mutex_lock(&niu_parent_lock
);
8784 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
8786 p
->ports
[port
] = NULL
;
8789 if (atomic_dec_and_test(&p
->refcnt
)) {
8791 platform_device_unregister(p
->plat_dev
);
8794 mutex_unlock(&niu_parent_lock
);
8797 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
8798 u64
*handle
, gfp_t flag
)
8803 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
8809 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
8810 void *cpu_addr
, u64 handle
)
8812 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
8815 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
8816 unsigned long offset
, size_t size
,
8817 enum dma_data_direction direction
)
8819 return dma_map_page(dev
, page
, offset
, size
, direction
);
8822 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
8823 size_t size
, enum dma_data_direction direction
)
8825 return dma_unmap_page(dev
, dma_address
, size
, direction
);
8828 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
8830 enum dma_data_direction direction
)
8832 return dma_map_single(dev
, cpu_addr
, size
, direction
);
8835 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
8837 enum dma_data_direction direction
)
8839 dma_unmap_single(dev
, dma_address
, size
, direction
);
8842 static const struct niu_ops niu_pci_ops
= {
8843 .alloc_coherent
= niu_pci_alloc_coherent
,
8844 .free_coherent
= niu_pci_free_coherent
,
8845 .map_page
= niu_pci_map_page
,
8846 .unmap_page
= niu_pci_unmap_page
,
8847 .map_single
= niu_pci_map_single
,
8848 .unmap_single
= niu_pci_unmap_single
,
8851 static void __devinit
niu_driver_version(void)
8853 static int niu_version_printed
;
8855 if (niu_version_printed
++ == 0)
8856 pr_info("%s", version
);
8859 static struct net_device
* __devinit
niu_alloc_and_init(
8860 struct device
*gen_dev
, struct pci_dev
*pdev
,
8861 struct of_device
*op
, const struct niu_ops
*ops
,
8864 struct net_device
*dev
;
8867 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
8869 dev_err(gen_dev
, PFX
"Etherdev alloc failed, aborting.\n");
8873 SET_NETDEV_DEV(dev
, gen_dev
);
8875 np
= netdev_priv(dev
);
8879 np
->device
= gen_dev
;
8882 np
->msg_enable
= niu_debug
;
8884 spin_lock_init(&np
->lock
);
8885 INIT_WORK(&np
->reset_task
, niu_reset_task
);
8892 static const struct net_device_ops niu_netdev_ops
= {
8893 .ndo_open
= niu_open
,
8894 .ndo_stop
= niu_close
,
8895 .ndo_start_xmit
= niu_start_xmit
,
8896 .ndo_get_stats
= niu_get_stats
,
8897 .ndo_set_multicast_list
= niu_set_rx_mode
,
8898 .ndo_validate_addr
= eth_validate_addr
,
8899 .ndo_set_mac_address
= niu_set_mac_addr
,
8900 .ndo_do_ioctl
= niu_ioctl
,
8901 .ndo_tx_timeout
= niu_tx_timeout
,
8902 .ndo_change_mtu
= niu_change_mtu
,
8905 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
8907 dev
->netdev_ops
= &niu_netdev_ops
;
8908 dev
->ethtool_ops
= &niu_ethtool_ops
;
8909 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
8912 static void __devinit
niu_device_announce(struct niu
*np
)
8914 struct net_device
*dev
= np
->dev
;
8916 pr_info("%s: NIU Ethernet %pM\n", dev
->name
, dev
->dev_addr
);
8918 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
8919 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8921 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8922 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8923 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
8924 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8925 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8928 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8930 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8931 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8932 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" :
8933 (np
->flags
& NIU_FLAGS_XCVR_SERDES
? "SERDES" :
8935 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8936 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8941 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
8942 const struct pci_device_id
*ent
)
8944 union niu_parent_id parent_id
;
8945 struct net_device
*dev
;
8951 niu_driver_version();
8953 err
= pci_enable_device(pdev
);
8955 dev_err(&pdev
->dev
, PFX
"Cannot enable PCI device, "
8960 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
8961 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8962 dev_err(&pdev
->dev
, PFX
"Cannot find proper PCI device "
8963 "base addresses, aborting.\n");
8965 goto err_out_disable_pdev
;
8968 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8970 dev_err(&pdev
->dev
, PFX
"Cannot obtain PCI resources, "
8972 goto err_out_disable_pdev
;
8975 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8977 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
8979 goto err_out_free_res
;
8982 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
8983 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
8986 goto err_out_free_res
;
8988 np
= netdev_priv(dev
);
8990 memset(&parent_id
, 0, sizeof(parent_id
));
8991 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
8992 parent_id
.pci
.bus
= pdev
->bus
->number
;
8993 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
8995 np
->parent
= niu_get_parent(np
, &parent_id
,
8999 goto err_out_free_dev
;
9002 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
9003 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
9004 val16
|= (PCI_EXP_DEVCTL_CERE
|
9005 PCI_EXP_DEVCTL_NFERE
|
9006 PCI_EXP_DEVCTL_FERE
|
9007 PCI_EXP_DEVCTL_URRE
|
9008 PCI_EXP_DEVCTL_RELAX_EN
);
9009 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
9011 dma_mask
= DMA_44BIT_MASK
;
9012 err
= pci_set_dma_mask(pdev
, dma_mask
);
9014 dev
->features
|= NETIF_F_HIGHDMA
;
9015 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
9017 dev_err(&pdev
->dev
, PFX
"Unable to obtain 44 bit "
9018 "DMA for consistent allocations, "
9020 goto err_out_release_parent
;
9023 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
9024 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
9026 dev_err(&pdev
->dev
, PFX
"No usable DMA configuration, "
9028 goto err_out_release_parent
;
9032 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9034 np
->regs
= pci_ioremap_bar(pdev
, 0);
9036 dev_err(&pdev
->dev
, PFX
"Cannot map device registers, "
9039 goto err_out_release_parent
;
9042 pci_set_master(pdev
);
9043 pci_save_state(pdev
);
9045 dev
->irq
= pdev
->irq
;
9047 niu_assign_netdev_ops(dev
);
9049 err
= niu_get_invariants(np
);
9052 dev_err(&pdev
->dev
, PFX
"Problem fetching invariants "
9053 "of chip, aborting.\n");
9054 goto err_out_iounmap
;
9057 err
= register_netdev(dev
);
9059 dev_err(&pdev
->dev
, PFX
"Cannot register net device, "
9061 goto err_out_iounmap
;
9064 pci_set_drvdata(pdev
, dev
);
9066 niu_device_announce(np
);
9076 err_out_release_parent
:
9083 pci_release_regions(pdev
);
9085 err_out_disable_pdev
:
9086 pci_disable_device(pdev
);
9087 pci_set_drvdata(pdev
, NULL
);
9092 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
9094 struct net_device
*dev
= pci_get_drvdata(pdev
);
9097 struct niu
*np
= netdev_priv(dev
);
9099 unregister_netdev(dev
);
9110 pci_release_regions(pdev
);
9111 pci_disable_device(pdev
);
9112 pci_set_drvdata(pdev
, NULL
);
9116 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
9118 struct net_device
*dev
= pci_get_drvdata(pdev
);
9119 struct niu
*np
= netdev_priv(dev
);
9120 unsigned long flags
;
9122 if (!netif_running(dev
))
9125 flush_scheduled_work();
9128 del_timer_sync(&np
->timer
);
9130 spin_lock_irqsave(&np
->lock
, flags
);
9131 niu_enable_interrupts(np
, 0);
9132 spin_unlock_irqrestore(&np
->lock
, flags
);
9134 netif_device_detach(dev
);
9136 spin_lock_irqsave(&np
->lock
, flags
);
9138 spin_unlock_irqrestore(&np
->lock
, flags
);
9140 pci_save_state(pdev
);
9145 static int niu_resume(struct pci_dev
*pdev
)
9147 struct net_device
*dev
= pci_get_drvdata(pdev
);
9148 struct niu
*np
= netdev_priv(dev
);
9149 unsigned long flags
;
9152 if (!netif_running(dev
))
9155 pci_restore_state(pdev
);
9157 netif_device_attach(dev
);
9159 spin_lock_irqsave(&np
->lock
, flags
);
9161 err
= niu_init_hw(np
);
9163 np
->timer
.expires
= jiffies
+ HZ
;
9164 add_timer(&np
->timer
);
9165 niu_netif_start(np
);
9168 spin_unlock_irqrestore(&np
->lock
, flags
);
9173 static struct pci_driver niu_pci_driver
= {
9174 .name
= DRV_MODULE_NAME
,
9175 .id_table
= niu_pci_tbl
,
9176 .probe
= niu_pci_init_one
,
9177 .remove
= __devexit_p(niu_pci_remove_one
),
9178 .suspend
= niu_suspend
,
9179 .resume
= niu_resume
,
9182 #ifdef CONFIG_SPARC64
9183 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
9184 u64
*dma_addr
, gfp_t flag
)
9186 unsigned long order
= get_order(size
);
9187 unsigned long page
= __get_free_pages(flag
, order
);
9191 memset((char *)page
, 0, PAGE_SIZE
<< order
);
9192 *dma_addr
= __pa(page
);
9194 return (void *) page
;
9197 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
9198 void *cpu_addr
, u64 handle
)
9200 unsigned long order
= get_order(size
);
9202 free_pages((unsigned long) cpu_addr
, order
);
9205 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
9206 unsigned long offset
, size_t size
,
9207 enum dma_data_direction direction
)
9209 return page_to_phys(page
) + offset
;
9212 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
9213 size_t size
, enum dma_data_direction direction
)
9215 /* Nothing to do. */
9218 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
9220 enum dma_data_direction direction
)
9222 return __pa(cpu_addr
);
9225 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
9227 enum dma_data_direction direction
)
9229 /* Nothing to do. */
9232 static const struct niu_ops niu_phys_ops
= {
9233 .alloc_coherent
= niu_phys_alloc_coherent
,
9234 .free_coherent
= niu_phys_free_coherent
,
9235 .map_page
= niu_phys_map_page
,
9236 .unmap_page
= niu_phys_unmap_page
,
9237 .map_single
= niu_phys_map_single
,
9238 .unmap_single
= niu_phys_unmap_single
,
9241 static unsigned long res_size(struct resource
*r
)
9243 return r
->end
- r
->start
+ 1UL;
9246 static int __devinit
niu_of_probe(struct of_device
*op
,
9247 const struct of_device_id
*match
)
9249 union niu_parent_id parent_id
;
9250 struct net_device
*dev
;
9255 niu_driver_version();
9257 reg
= of_get_property(op
->node
, "reg", NULL
);
9259 dev_err(&op
->dev
, PFX
"%s: No 'reg' property, aborting.\n",
9260 op
->node
->full_name
);
9264 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
9265 &niu_phys_ops
, reg
[0] & 0x1);
9270 np
= netdev_priv(dev
);
9272 memset(&parent_id
, 0, sizeof(parent_id
));
9273 parent_id
.of
= of_get_parent(op
->node
);
9275 np
->parent
= niu_get_parent(np
, &parent_id
,
9279 goto err_out_free_dev
;
9282 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9284 np
->regs
= of_ioremap(&op
->resource
[1], 0,
9285 res_size(&op
->resource
[1]),
9288 dev_err(&op
->dev
, PFX
"Cannot map device registers, "
9291 goto err_out_release_parent
;
9294 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
9295 res_size(&op
->resource
[2]),
9297 if (!np
->vir_regs_1
) {
9298 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 1, "
9301 goto err_out_iounmap
;
9304 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
9305 res_size(&op
->resource
[3]),
9307 if (!np
->vir_regs_2
) {
9308 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 2, "
9311 goto err_out_iounmap
;
9314 niu_assign_netdev_ops(dev
);
9316 err
= niu_get_invariants(np
);
9319 dev_err(&op
->dev
, PFX
"Problem fetching invariants "
9320 "of chip, aborting.\n");
9321 goto err_out_iounmap
;
9324 err
= register_netdev(dev
);
9326 dev_err(&op
->dev
, PFX
"Cannot register net device, "
9328 goto err_out_iounmap
;
9331 dev_set_drvdata(&op
->dev
, dev
);
9333 niu_device_announce(np
);
9338 if (np
->vir_regs_1
) {
9339 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9340 res_size(&op
->resource
[2]));
9341 np
->vir_regs_1
= NULL
;
9344 if (np
->vir_regs_2
) {
9345 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9346 res_size(&op
->resource
[3]));
9347 np
->vir_regs_2
= NULL
;
9351 of_iounmap(&op
->resource
[1], np
->regs
,
9352 res_size(&op
->resource
[1]));
9356 err_out_release_parent
:
9366 static int __devexit
niu_of_remove(struct of_device
*op
)
9368 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
9371 struct niu
*np
= netdev_priv(dev
);
9373 unregister_netdev(dev
);
9375 if (np
->vir_regs_1
) {
9376 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9377 res_size(&op
->resource
[2]));
9378 np
->vir_regs_1
= NULL
;
9381 if (np
->vir_regs_2
) {
9382 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9383 res_size(&op
->resource
[3]));
9384 np
->vir_regs_2
= NULL
;
9388 of_iounmap(&op
->resource
[1], np
->regs
,
9389 res_size(&op
->resource
[1]));
9398 dev_set_drvdata(&op
->dev
, NULL
);
9403 static const struct of_device_id niu_match
[] = {
9406 .compatible
= "SUNW,niusl",
9410 MODULE_DEVICE_TABLE(of
, niu_match
);
9412 static struct of_platform_driver niu_of_driver
= {
9414 .match_table
= niu_match
,
9415 .probe
= niu_of_probe
,
9416 .remove
= __devexit_p(niu_of_remove
),
9419 #endif /* CONFIG_SPARC64 */
9421 static int __init
niu_init(void)
9425 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
9427 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
9429 #ifdef CONFIG_SPARC64
9430 err
= of_register_driver(&niu_of_driver
, &of_bus_type
);
9434 err
= pci_register_driver(&niu_pci_driver
);
9435 #ifdef CONFIG_SPARC64
9437 of_unregister_driver(&niu_of_driver
);
9444 static void __exit
niu_exit(void)
9446 pci_unregister_driver(&niu_pci_driver
);
9447 #ifdef CONFIG_SPARC64
9448 of_unregister_driver(&niu_of_driver
);
9452 module_init(niu_init
);
9453 module_exit(niu_exit
);