1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/list.h>
30 #include <linux/of_device.h>
35 #define DRV_MODULE_NAME "niu"
36 #define PFX DRV_MODULE_NAME ": "
37 #define DRV_MODULE_VERSION "1.0"
38 #define DRV_MODULE_RELDATE "Nov 14, 2008"
40 static char version
[] __devinitdata
=
41 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
43 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
44 MODULE_DESCRIPTION("NIU ethernet driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_MODULE_VERSION
);
48 #ifndef DMA_44BIT_MASK
49 #define DMA_44BIT_MASK 0x00000fffffffffffULL
53 static u64
readq(void __iomem
*reg
)
55 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
58 static void writeq(u64 val
, void __iomem
*reg
)
60 writel(val
& 0xffffffff, reg
);
61 writel(val
>> 32, reg
+ 0x4UL
);
65 static struct pci_device_id niu_pci_tbl
[] = {
66 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
70 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
72 #define NIU_TX_TIMEOUT (5 * HZ)
74 #define nr64(reg) readq(np->regs + (reg))
75 #define nw64(reg, val) writeq((val), np->regs + (reg))
77 #define nr64_mac(reg) readq(np->mac_regs + (reg))
78 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
80 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
81 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
83 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
84 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
86 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
87 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
89 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
92 static int debug
= -1;
93 module_param(debug
, int, 0);
94 MODULE_PARM_DESC(debug
, "NIU debug level");
96 #define niudbg(TYPE, f, a...) \
97 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
98 printk(KERN_DEBUG PFX f, ## a); \
101 #define niuinfo(TYPE, f, a...) \
102 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
103 printk(KERN_INFO PFX f, ## a); \
106 #define niuwarn(TYPE, f, a...) \
107 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
108 printk(KERN_WARNING PFX f, ## a); \
111 #define niu_lock_parent(np, flags) \
112 spin_lock_irqsave(&np->parent->lock, flags)
113 #define niu_unlock_parent(np, flags) \
114 spin_unlock_irqrestore(&np->parent->lock, flags)
116 static int serdes_init_10g_serdes(struct niu
*np
);
118 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
119 u64 bits
, int limit
, int delay
)
121 while (--limit
>= 0) {
122 u64 val
= nr64_mac(reg
);
133 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
134 u64 bits
, int limit
, int delay
,
135 const char *reg_name
)
140 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
142 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
143 "would not clear, val[%llx]\n",
144 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
145 (unsigned long long) nr64_mac(reg
));
149 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
150 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
151 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
154 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
155 u64 bits
, int limit
, int delay
)
157 while (--limit
>= 0) {
158 u64 val
= nr64_ipp(reg
);
169 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
170 u64 bits
, int limit
, int delay
,
171 const char *reg_name
)
180 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
182 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
183 "would not clear, val[%llx]\n",
184 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
185 (unsigned long long) nr64_ipp(reg
));
189 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
190 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
194 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
195 u64 bits
, int limit
, int delay
)
197 while (--limit
>= 0) {
209 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
210 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
214 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
215 u64 bits
, int limit
, int delay
,
216 const char *reg_name
)
221 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
223 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
224 "would not clear, val[%llx]\n",
225 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
226 (unsigned long long) nr64(reg
));
230 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
231 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
232 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
235 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
237 u64 val
= (u64
) lp
->timer
;
240 val
|= LDG_IMGMT_ARM
;
242 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
245 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
247 unsigned long mask_reg
, bits
;
250 if (ldn
< 0 || ldn
> LDN_MAX
)
254 mask_reg
= LD_IM0(ldn
);
257 mask_reg
= LD_IM1(ldn
- 64);
261 val
= nr64(mask_reg
);
271 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
273 struct niu_parent
*parent
= np
->parent
;
276 for (i
= 0; i
<= LDN_MAX
; i
++) {
279 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
282 err
= niu_ldn_irq_enable(np
, i
, on
);
289 static int niu_enable_interrupts(struct niu
*np
, int on
)
293 for (i
= 0; i
< np
->num_ldg
; i
++) {
294 struct niu_ldg
*lp
= &np
->ldg
[i
];
297 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
301 for (i
= 0; i
< np
->num_ldg
; i
++)
302 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
307 static u32
phy_encode(u32 type
, int port
)
309 return (type
<< (port
* 2));
312 static u32
phy_decode(u32 val
, int port
)
314 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
317 static int mdio_wait(struct niu
*np
)
322 while (--limit
> 0) {
323 val
= nr64(MIF_FRAME_OUTPUT
);
324 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
325 return val
& MIF_FRAME_OUTPUT_DATA
;
333 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
337 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
342 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
343 return mdio_wait(np
);
346 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
350 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
355 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
363 static int mii_read(struct niu
*np
, int port
, int reg
)
365 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
366 return mdio_wait(np
);
369 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
373 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
381 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
385 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
386 ESR2_TI_PLL_TX_CFG_L(channel
),
389 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
390 ESR2_TI_PLL_TX_CFG_H(channel
),
395 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
399 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
400 ESR2_TI_PLL_RX_CFG_L(channel
),
403 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
404 ESR2_TI_PLL_RX_CFG_H(channel
),
409 /* Mode is always 10G fiber. */
410 static int serdes_init_niu_10g_fiber(struct niu
*np
)
412 struct niu_link_config
*lp
= &np
->link_config
;
416 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
417 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
418 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
419 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
421 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
422 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
424 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
425 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
427 tx_cfg
|= PLL_TX_CFG_ENTEST
;
428 rx_cfg
|= PLL_RX_CFG_ENTEST
;
431 /* Initialize all 4 lanes of the SERDES. */
432 for (i
= 0; i
< 4; i
++) {
433 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
438 for (i
= 0; i
< 4; i
++) {
439 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
447 static int serdes_init_niu_1g_serdes(struct niu
*np
)
449 struct niu_link_config
*lp
= &np
->link_config
;
450 u16 pll_cfg
, pll_sts
;
452 u64
uninitialized_var(sig
), mask
, val
;
457 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
|
458 PLL_TX_CFG_RATE_HALF
);
459 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
460 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
461 PLL_RX_CFG_RATE_HALF
);
464 rx_cfg
|= PLL_RX_CFG_EQ_LP_ADAPTIVE
;
466 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
467 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
469 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
470 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
472 tx_cfg
|= PLL_TX_CFG_ENTEST
;
473 rx_cfg
|= PLL_RX_CFG_ENTEST
;
476 /* Initialize PLL for 1G */
477 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_8X
);
479 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
480 ESR2_TI_PLL_CFG_L
, pll_cfg
);
482 dev_err(np
->device
, PFX
"NIU Port %d "
483 "serdes_init_niu_1g_serdes: "
484 "mdio write to ESR2_TI_PLL_CFG_L failed", np
->port
);
488 pll_sts
= PLL_CFG_ENPLL
;
490 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
491 ESR2_TI_PLL_STS_L
, pll_sts
);
493 dev_err(np
->device
, PFX
"NIU Port %d "
494 "serdes_init_niu_1g_serdes: "
495 "mdio write to ESR2_TI_PLL_STS_L failed", np
->port
);
501 /* Initialize all 4 lanes of the SERDES. */
502 for (i
= 0; i
< 4; i
++) {
503 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
508 for (i
= 0; i
< 4; i
++) {
509 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
516 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
521 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
529 while (max_retry
--) {
530 sig
= nr64(ESR_INT_SIGNALS
);
531 if ((sig
& mask
) == val
)
537 if ((sig
& mask
) != val
) {
538 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
539 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
546 static int serdes_init_niu_10g_serdes(struct niu
*np
)
548 struct niu_link_config
*lp
= &np
->link_config
;
549 u32 tx_cfg
, rx_cfg
, pll_cfg
, pll_sts
;
551 u64
uninitialized_var(sig
), mask
, val
;
555 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
556 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
557 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
558 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
560 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
561 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
563 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
564 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
566 tx_cfg
|= PLL_TX_CFG_ENTEST
;
567 rx_cfg
|= PLL_RX_CFG_ENTEST
;
570 /* Initialize PLL for 10G */
571 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_10X
);
573 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
574 ESR2_TI_PLL_CFG_L
, pll_cfg
& 0xffff);
576 dev_err(np
->device
, PFX
"NIU Port %d "
577 "serdes_init_niu_10g_serdes: "
578 "mdio write to ESR2_TI_PLL_CFG_L failed", np
->port
);
582 pll_sts
= PLL_CFG_ENPLL
;
584 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
585 ESR2_TI_PLL_STS_L
, pll_sts
& 0xffff);
587 dev_err(np
->device
, PFX
"NIU Port %d "
588 "serdes_init_niu_10g_serdes: "
589 "mdio write to ESR2_TI_PLL_STS_L failed", np
->port
);
595 /* Initialize all 4 lanes of the SERDES. */
596 for (i
= 0; i
< 4; i
++) {
597 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
602 for (i
= 0; i
< 4; i
++) {
603 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
608 /* check if serdes is ready */
612 mask
= ESR_INT_SIGNALS_P0_BITS
;
613 val
= (ESR_INT_SRDY0_P0
|
623 mask
= ESR_INT_SIGNALS_P1_BITS
;
624 val
= (ESR_INT_SRDY0_P1
|
637 while (max_retry
--) {
638 sig
= nr64(ESR_INT_SIGNALS
);
639 if ((sig
& mask
) == val
)
645 if ((sig
& mask
) != val
) {
646 pr_info(PFX
"NIU Port %u signal bits [%08x] are not "
647 "[%08x] for 10G...trying 1G\n",
648 np
->port
, (int) (sig
& mask
), (int) val
);
650 /* 10G failed, try initializing at 1G */
651 err
= serdes_init_niu_1g_serdes(np
);
653 np
->flags
&= ~NIU_FLAGS_10G
;
654 np
->mac_xcvr
= MAC_XCVR_PCS
;
656 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES "
657 "Link Failed \n", np
->port
);
664 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
668 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
670 *val
= (err
& 0xffff);
671 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
672 ESR_RXTX_CTRL_H(chan
));
674 *val
|= ((err
& 0xffff) << 16);
680 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
684 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
685 ESR_GLUE_CTRL0_L(chan
));
687 *val
= (err
& 0xffff);
688 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
689 ESR_GLUE_CTRL0_H(chan
));
691 *val
|= ((err
& 0xffff) << 16);
698 static int esr_read_reset(struct niu
*np
, u32
*val
)
702 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
703 ESR_RXTX_RESET_CTRL_L
);
705 *val
= (err
& 0xffff);
706 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
707 ESR_RXTX_RESET_CTRL_H
);
709 *val
|= ((err
& 0xffff) << 16);
716 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
720 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
721 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
723 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
724 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
728 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
732 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
733 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
735 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
736 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
740 static int esr_reset(struct niu
*np
)
742 u32
uninitialized_var(reset
);
745 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
746 ESR_RXTX_RESET_CTRL_L
, 0x0000);
749 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
750 ESR_RXTX_RESET_CTRL_H
, 0xffff);
755 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
756 ESR_RXTX_RESET_CTRL_L
, 0xffff);
761 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
762 ESR_RXTX_RESET_CTRL_H
, 0x0000);
767 err
= esr_read_reset(np
, &reset
);
771 dev_err(np
->device
, PFX
"Port %u ESR_RESET "
772 "did not clear [%08x]\n",
780 static int serdes_init_10g(struct niu
*np
)
782 struct niu_link_config
*lp
= &np
->link_config
;
783 unsigned long ctrl_reg
, test_cfg_reg
, i
;
784 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
789 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
790 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
793 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
794 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
800 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
801 ENET_SERDES_CTRL_SDET_1
|
802 ENET_SERDES_CTRL_SDET_2
|
803 ENET_SERDES_CTRL_SDET_3
|
804 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
805 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
806 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
807 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
808 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
809 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
810 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
811 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
814 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
815 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
816 ENET_SERDES_TEST_MD_0_SHIFT
) |
817 (ENET_TEST_MD_PAD_LOOPBACK
<<
818 ENET_SERDES_TEST_MD_1_SHIFT
) |
819 (ENET_TEST_MD_PAD_LOOPBACK
<<
820 ENET_SERDES_TEST_MD_2_SHIFT
) |
821 (ENET_TEST_MD_PAD_LOOPBACK
<<
822 ENET_SERDES_TEST_MD_3_SHIFT
));
825 nw64(ctrl_reg
, ctrl_val
);
826 nw64(test_cfg_reg
, test_cfg_val
);
828 /* Initialize all 4 lanes of the SERDES. */
829 for (i
= 0; i
< 4; i
++) {
830 u32 rxtx_ctrl
, glue0
;
832 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
835 err
= esr_read_glue0(np
, i
, &glue0
);
839 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
840 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
841 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
843 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
844 ESR_GLUE_CTRL0_THCNT
|
845 ESR_GLUE_CTRL0_BLTIME
);
846 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
847 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
848 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
849 (BLTIME_300_CYCLES
<<
850 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
852 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
855 err
= esr_write_glue0(np
, i
, glue0
);
864 sig
= nr64(ESR_INT_SIGNALS
);
867 mask
= ESR_INT_SIGNALS_P0_BITS
;
868 val
= (ESR_INT_SRDY0_P0
|
878 mask
= ESR_INT_SIGNALS_P1_BITS
;
879 val
= (ESR_INT_SRDY0_P1
|
892 if ((sig
& mask
) != val
) {
893 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
894 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
897 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
898 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
901 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
902 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
906 static int serdes_init_1g(struct niu
*np
)
910 val
= nr64(ENET_SERDES_1_PLL_CFG
);
911 val
&= ~ENET_SERDES_PLL_FBDIV2
;
914 val
|= ENET_SERDES_PLL_HRATE0
;
917 val
|= ENET_SERDES_PLL_HRATE1
;
920 val
|= ENET_SERDES_PLL_HRATE2
;
923 val
|= ENET_SERDES_PLL_HRATE3
;
928 nw64(ENET_SERDES_1_PLL_CFG
, val
);
933 static int serdes_init_1g_serdes(struct niu
*np
)
935 struct niu_link_config
*lp
= &np
->link_config
;
936 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
937 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
939 u64 reset_val
, val_rd
;
941 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
942 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
943 ENET_SERDES_PLL_FBDIV0
;
946 reset_val
= ENET_SERDES_RESET_0
;
947 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
948 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
949 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
952 reset_val
= ENET_SERDES_RESET_1
;
953 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
954 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
955 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
961 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
962 ENET_SERDES_CTRL_SDET_1
|
963 ENET_SERDES_CTRL_SDET_2
|
964 ENET_SERDES_CTRL_SDET_3
|
965 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
966 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
967 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
968 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
969 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
970 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
971 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
972 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
975 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
976 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
977 ENET_SERDES_TEST_MD_0_SHIFT
) |
978 (ENET_TEST_MD_PAD_LOOPBACK
<<
979 ENET_SERDES_TEST_MD_1_SHIFT
) |
980 (ENET_TEST_MD_PAD_LOOPBACK
<<
981 ENET_SERDES_TEST_MD_2_SHIFT
) |
982 (ENET_TEST_MD_PAD_LOOPBACK
<<
983 ENET_SERDES_TEST_MD_3_SHIFT
));
986 nw64(ENET_SERDES_RESET
, reset_val
);
988 val_rd
= nr64(ENET_SERDES_RESET
);
989 val_rd
&= ~reset_val
;
991 nw64(ctrl_reg
, ctrl_val
);
992 nw64(test_cfg_reg
, test_cfg_val
);
993 nw64(ENET_SERDES_RESET
, val_rd
);
996 /* Initialize all 4 lanes of the SERDES. */
997 for (i
= 0; i
< 4; i
++) {
998 u32 rxtx_ctrl
, glue0
;
1000 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
1003 err
= esr_read_glue0(np
, i
, &glue0
);
1007 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
1008 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
1009 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
1011 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
1012 ESR_GLUE_CTRL0_THCNT
|
1013 ESR_GLUE_CTRL0_BLTIME
);
1014 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
1015 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
1016 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
1017 (BLTIME_300_CYCLES
<<
1018 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
1020 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
1023 err
= esr_write_glue0(np
, i
, glue0
);
1029 sig
= nr64(ESR_INT_SIGNALS
);
1032 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
1037 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
1045 if ((sig
& mask
) != val
) {
1046 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
1047 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
1054 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
1056 struct niu_link_config
*lp
= &np
->link_config
;
1060 unsigned long flags
;
1064 current_speed
= SPEED_INVALID
;
1065 current_duplex
= DUPLEX_INVALID
;
1067 spin_lock_irqsave(&np
->lock
, flags
);
1069 val
= nr64_pcs(PCS_MII_STAT
);
1071 if (val
& PCS_MII_STAT_LINK_STATUS
) {
1073 current_speed
= SPEED_1000
;
1074 current_duplex
= DUPLEX_FULL
;
1077 lp
->active_speed
= current_speed
;
1078 lp
->active_duplex
= current_duplex
;
1079 spin_unlock_irqrestore(&np
->lock
, flags
);
1081 *link_up_p
= link_up
;
1085 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
1087 unsigned long flags
;
1088 struct niu_link_config
*lp
= &np
->link_config
;
1095 if (!(np
->flags
& NIU_FLAGS_10G
))
1096 return link_status_1g_serdes(np
, link_up_p
);
1098 current_speed
= SPEED_INVALID
;
1099 current_duplex
= DUPLEX_INVALID
;
1100 spin_lock_irqsave(&np
->lock
, flags
);
1102 val
= nr64_xpcs(XPCS_STATUS(0));
1103 val2
= nr64_mac(XMAC_INTER2
);
1104 if (val2
& 0x01000000)
1107 if ((val
& 0x1000ULL
) && link_ok
) {
1109 current_speed
= SPEED_10000
;
1110 current_duplex
= DUPLEX_FULL
;
1112 lp
->active_speed
= current_speed
;
1113 lp
->active_duplex
= current_duplex
;
1114 spin_unlock_irqrestore(&np
->lock
, flags
);
1115 *link_up_p
= link_up
;
1119 static int link_status_mii(struct niu
*np
, int *link_up_p
)
1121 struct niu_link_config
*lp
= &np
->link_config
;
1123 int bmsr
, advert
, ctrl1000
, stat1000
, lpa
, bmcr
, estatus
;
1124 int supported
, advertising
, active_speed
, active_duplex
;
1126 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1127 if (unlikely(err
< 0))
1131 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1132 if (unlikely(err
< 0))
1136 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1137 if (unlikely(err
< 0))
1141 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1142 if (unlikely(err
< 0))
1146 if (likely(bmsr
& BMSR_ESTATEN
)) {
1147 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1148 if (unlikely(err
< 0))
1152 err
= mii_read(np
, np
->phy_addr
, MII_CTRL1000
);
1153 if (unlikely(err
< 0))
1157 err
= mii_read(np
, np
->phy_addr
, MII_STAT1000
);
1158 if (unlikely(err
< 0))
1162 estatus
= ctrl1000
= stat1000
= 0;
1165 if (bmsr
& BMSR_ANEGCAPABLE
)
1166 supported
|= SUPPORTED_Autoneg
;
1167 if (bmsr
& BMSR_10HALF
)
1168 supported
|= SUPPORTED_10baseT_Half
;
1169 if (bmsr
& BMSR_10FULL
)
1170 supported
|= SUPPORTED_10baseT_Full
;
1171 if (bmsr
& BMSR_100HALF
)
1172 supported
|= SUPPORTED_100baseT_Half
;
1173 if (bmsr
& BMSR_100FULL
)
1174 supported
|= SUPPORTED_100baseT_Full
;
1175 if (estatus
& ESTATUS_1000_THALF
)
1176 supported
|= SUPPORTED_1000baseT_Half
;
1177 if (estatus
& ESTATUS_1000_TFULL
)
1178 supported
|= SUPPORTED_1000baseT_Full
;
1179 lp
->supported
= supported
;
1182 if (advert
& ADVERTISE_10HALF
)
1183 advertising
|= ADVERTISED_10baseT_Half
;
1184 if (advert
& ADVERTISE_10FULL
)
1185 advertising
|= ADVERTISED_10baseT_Full
;
1186 if (advert
& ADVERTISE_100HALF
)
1187 advertising
|= ADVERTISED_100baseT_Half
;
1188 if (advert
& ADVERTISE_100FULL
)
1189 advertising
|= ADVERTISED_100baseT_Full
;
1190 if (ctrl1000
& ADVERTISE_1000HALF
)
1191 advertising
|= ADVERTISED_1000baseT_Half
;
1192 if (ctrl1000
& ADVERTISE_1000FULL
)
1193 advertising
|= ADVERTISED_1000baseT_Full
;
1195 if (bmcr
& BMCR_ANENABLE
) {
1198 lp
->active_autoneg
= 1;
1199 advertising
|= ADVERTISED_Autoneg
;
1202 neg1000
= (ctrl1000
<< 2) & stat1000
;
1204 if (neg1000
& (LPA_1000FULL
| LPA_1000HALF
))
1205 active_speed
= SPEED_1000
;
1206 else if (neg
& LPA_100
)
1207 active_speed
= SPEED_100
;
1208 else if (neg
& (LPA_10HALF
| LPA_10FULL
))
1209 active_speed
= SPEED_10
;
1211 active_speed
= SPEED_INVALID
;
1213 if ((neg1000
& LPA_1000FULL
) || (neg
& LPA_DUPLEX
))
1214 active_duplex
= DUPLEX_FULL
;
1215 else if (active_speed
!= SPEED_INVALID
)
1216 active_duplex
= DUPLEX_HALF
;
1218 active_duplex
= DUPLEX_INVALID
;
1220 lp
->active_autoneg
= 0;
1222 if ((bmcr
& BMCR_SPEED1000
) && !(bmcr
& BMCR_SPEED100
))
1223 active_speed
= SPEED_1000
;
1224 else if (bmcr
& BMCR_SPEED100
)
1225 active_speed
= SPEED_100
;
1227 active_speed
= SPEED_10
;
1229 if (bmcr
& BMCR_FULLDPLX
)
1230 active_duplex
= DUPLEX_FULL
;
1232 active_duplex
= DUPLEX_HALF
;
1235 lp
->active_advertising
= advertising
;
1236 lp
->active_speed
= active_speed
;
1237 lp
->active_duplex
= active_duplex
;
1238 *link_up_p
= !!(bmsr
& BMSR_LSTATUS
);
1243 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
1245 struct niu_link_config
*lp
= &np
->link_config
;
1246 u16 current_speed
, bmsr
;
1247 unsigned long flags
;
1252 current_speed
= SPEED_INVALID
;
1253 current_duplex
= DUPLEX_INVALID
;
1255 spin_lock_irqsave(&np
->lock
, flags
);
1259 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1264 if (bmsr
& BMSR_LSTATUS
) {
1265 u16 adv
, lpa
, common
, estat
;
1267 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1272 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1279 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1284 current_speed
= SPEED_1000
;
1285 current_duplex
= DUPLEX_FULL
;
1288 lp
->active_speed
= current_speed
;
1289 lp
->active_duplex
= current_duplex
;
1293 spin_unlock_irqrestore(&np
->lock
, flags
);
1295 *link_up_p
= link_up
;
1299 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1301 struct niu_link_config
*lp
= &np
->link_config
;
1302 unsigned long flags
;
1305 spin_lock_irqsave(&np
->lock
, flags
);
1307 err
= link_status_mii(np
, link_up_p
);
1308 lp
->supported
|= SUPPORTED_TP
;
1309 lp
->active_advertising
|= ADVERTISED_TP
;
1311 spin_unlock_irqrestore(&np
->lock
, flags
);
1315 static int bcm8704_reset(struct niu
*np
)
1319 err
= mdio_read(np
, np
->phy_addr
,
1320 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1321 if (err
< 0 || err
== 0xffff)
1324 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1330 while (--limit
>= 0) {
1331 err
= mdio_read(np
, np
->phy_addr
,
1332 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1335 if (!(err
& BMCR_RESET
))
1339 dev_err(np
->device
, PFX
"Port %u PHY will not reset "
1340 "(bmcr=%04x)\n", np
->port
, (err
& 0xffff));
1346 /* When written, certain PHY registers need to be read back twice
1347 * in order for the bits to settle properly.
1349 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
1351 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1354 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1360 static int bcm8706_init_user_dev3(struct niu
*np
)
1365 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1366 BCM8704_USER_OPT_DIGITAL_CTRL
);
1369 err
&= ~USER_ODIG_CTRL_GPIOS
;
1370 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1371 err
|= USER_ODIG_CTRL_RESV2
;
1372 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1373 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1382 static int bcm8704_init_user_dev3(struct niu
*np
)
1386 err
= mdio_write(np
, np
->phy_addr
,
1387 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1388 (USER_CONTROL_OPTXRST_LVL
|
1389 USER_CONTROL_OPBIASFLT_LVL
|
1390 USER_CONTROL_OBTMPFLT_LVL
|
1391 USER_CONTROL_OPPRFLT_LVL
|
1392 USER_CONTROL_OPTXFLT_LVL
|
1393 USER_CONTROL_OPRXLOS_LVL
|
1394 USER_CONTROL_OPRXFLT_LVL
|
1395 USER_CONTROL_OPTXON_LVL
|
1396 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1400 err
= mdio_write(np
, np
->phy_addr
,
1401 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1402 (USER_PMD_TX_CTL_XFP_CLKEN
|
1403 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1404 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1405 USER_PMD_TX_CTL_TSCK_LPWREN
));
1409 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1412 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1416 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1417 BCM8704_USER_OPT_DIGITAL_CTRL
);
1420 err
&= ~USER_ODIG_CTRL_GPIOS
;
1421 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1422 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1423 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1432 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1436 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1437 MRVL88X2011_LED_8_TO_11_CTL
);
1441 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1442 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1444 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1445 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1448 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1452 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1453 MRVL88X2011_LED_BLINK_CTL
);
1455 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1458 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1459 MRVL88X2011_LED_BLINK_CTL
, err
);
1465 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1469 /* Set LED functions */
1470 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1475 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1479 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1480 MRVL88X2011_GENERAL_CTL
);
1484 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1486 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1487 MRVL88X2011_GENERAL_CTL
, err
);
1491 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1492 MRVL88X2011_PMA_PMD_CTL_1
);
1496 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1497 err
|= MRVL88X2011_LOOPBACK
;
1499 err
&= ~MRVL88X2011_LOOPBACK
;
1501 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1502 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1507 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1508 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1512 static int xcvr_diag_bcm870x(struct niu
*np
)
1514 u16 analog_stat0
, tx_alarm_status
;
1518 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1522 pr_info(PFX
"Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1525 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1528 pr_info(PFX
"Port %u USER_DEV3(0x20) [%04x]\n",
1531 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1535 pr_info(PFX
"Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1539 /* XXX dig this out it might not be so useful XXX */
1540 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1541 BCM8704_USER_ANALOG_STATUS0
);
1544 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1545 BCM8704_USER_ANALOG_STATUS0
);
1550 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1551 BCM8704_USER_TX_ALARM_STATUS
);
1554 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1555 BCM8704_USER_TX_ALARM_STATUS
);
1558 tx_alarm_status
= err
;
1560 if (analog_stat0
!= 0x03fc) {
1561 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1562 pr_info(PFX
"Port %u cable not connected "
1563 "or bad cable.\n", np
->port
);
1564 } else if (analog_stat0
== 0x639c) {
1565 pr_info(PFX
"Port %u optical module is bad "
1566 "or missing.\n", np
->port
);
1573 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1575 struct niu_link_config
*lp
= &np
->link_config
;
1578 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1583 err
&= ~BMCR_LOOPBACK
;
1585 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1586 err
|= BMCR_LOOPBACK
;
1588 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1596 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1601 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1602 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1605 val
= nr64_mac(XMAC_CONFIG
);
1606 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1607 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1608 nw64_mac(XMAC_CONFIG
, val
);
1610 val
= nr64(MIF_CONFIG
);
1611 val
|= MIF_CONFIG_INDIRECT_MODE
;
1612 nw64(MIF_CONFIG
, val
);
1614 err
= bcm8704_reset(np
);
1618 err
= xcvr_10g_set_lb_bcm870x(np
);
1622 err
= bcm8706_init_user_dev3(np
);
1626 err
= xcvr_diag_bcm870x(np
);
1633 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1637 err
= bcm8704_reset(np
);
1641 err
= bcm8704_init_user_dev3(np
);
1645 err
= xcvr_10g_set_lb_bcm870x(np
);
1649 err
= xcvr_diag_bcm870x(np
);
1656 static int xcvr_init_10g(struct niu
*np
)
1661 val
= nr64_mac(XMAC_CONFIG
);
1662 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1663 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1664 nw64_mac(XMAC_CONFIG
, val
);
1666 /* XXX shared resource, lock parent XXX */
1667 val
= nr64(MIF_CONFIG
);
1668 val
|= MIF_CONFIG_INDIRECT_MODE
;
1669 nw64(MIF_CONFIG
, val
);
1671 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1672 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1674 /* handle different phy types */
1675 switch (phy_id
& NIU_PHY_ID_MASK
) {
1676 case NIU_PHY_ID_MRVL88X2011
:
1677 err
= xcvr_init_10g_mrvl88x2011(np
);
1680 default: /* bcom 8704 */
1681 err
= xcvr_init_10g_bcm8704(np
);
1688 static int mii_reset(struct niu
*np
)
1692 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1697 while (--limit
>= 0) {
1699 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1702 if (!(err
& BMCR_RESET
))
1706 dev_err(np
->device
, PFX
"Port %u MII would not reset, "
1707 "bmcr[%04x]\n", np
->port
, err
);
1714 static int xcvr_init_1g_rgmii(struct niu
*np
)
1718 u16 bmcr
, bmsr
, estat
;
1720 val
= nr64(MIF_CONFIG
);
1721 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1722 nw64(MIF_CONFIG
, val
);
1724 err
= mii_reset(np
);
1728 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1734 if (bmsr
& BMSR_ESTATEN
) {
1735 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1742 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1746 if (bmsr
& BMSR_ESTATEN
) {
1749 if (estat
& ESTATUS_1000_TFULL
)
1750 ctrl1000
|= ADVERTISE_1000FULL
;
1751 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1756 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1758 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1762 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1765 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1767 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1774 static int mii_init_common(struct niu
*np
)
1776 struct niu_link_config
*lp
= &np
->link_config
;
1777 u16 bmcr
, bmsr
, adv
, estat
;
1780 err
= mii_reset(np
);
1784 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1790 if (bmsr
& BMSR_ESTATEN
) {
1791 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1798 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1802 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1803 bmcr
|= BMCR_LOOPBACK
;
1804 if (lp
->active_speed
== SPEED_1000
)
1805 bmcr
|= BMCR_SPEED1000
;
1806 if (lp
->active_duplex
== DUPLEX_FULL
)
1807 bmcr
|= BMCR_FULLDPLX
;
1810 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1813 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1814 BCM5464R_AUX_CTL_WRITE_1
);
1815 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1823 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1824 if ((bmsr
& BMSR_10HALF
) &&
1825 (lp
->advertising
& ADVERTISED_10baseT_Half
))
1826 adv
|= ADVERTISE_10HALF
;
1827 if ((bmsr
& BMSR_10FULL
) &&
1828 (lp
->advertising
& ADVERTISED_10baseT_Full
))
1829 adv
|= ADVERTISE_10FULL
;
1830 if ((bmsr
& BMSR_100HALF
) &&
1831 (lp
->advertising
& ADVERTISED_100baseT_Half
))
1832 adv
|= ADVERTISE_100HALF
;
1833 if ((bmsr
& BMSR_100FULL
) &&
1834 (lp
->advertising
& ADVERTISED_100baseT_Full
))
1835 adv
|= ADVERTISE_100FULL
;
1836 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1840 if (likely(bmsr
& BMSR_ESTATEN
)) {
1842 if ((estat
& ESTATUS_1000_THALF
) &&
1843 (lp
->advertising
& ADVERTISED_1000baseT_Half
))
1844 ctrl1000
|= ADVERTISE_1000HALF
;
1845 if ((estat
& ESTATUS_1000_TFULL
) &&
1846 (lp
->advertising
& ADVERTISED_1000baseT_Full
))
1847 ctrl1000
|= ADVERTISE_1000FULL
;
1848 err
= mii_write(np
, np
->phy_addr
,
1849 MII_CTRL1000
, ctrl1000
);
1854 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1859 if (lp
->duplex
== DUPLEX_FULL
) {
1860 bmcr
|= BMCR_FULLDPLX
;
1862 } else if (lp
->duplex
== DUPLEX_HALF
)
1867 if (lp
->speed
== SPEED_1000
) {
1868 /* if X-full requested while not supported, or
1869 X-half requested while not supported... */
1870 if ((fulldpx
&& !(estat
& ESTATUS_1000_TFULL
)) ||
1871 (!fulldpx
&& !(estat
& ESTATUS_1000_THALF
)))
1873 bmcr
|= BMCR_SPEED1000
;
1874 } else if (lp
->speed
== SPEED_100
) {
1875 if ((fulldpx
&& !(bmsr
& BMSR_100FULL
)) ||
1876 (!fulldpx
&& !(bmsr
& BMSR_100HALF
)))
1878 bmcr
|= BMCR_SPEED100
;
1879 } else if (lp
->speed
== SPEED_10
) {
1880 if ((fulldpx
&& !(bmsr
& BMSR_10FULL
)) ||
1881 (!fulldpx
&& !(bmsr
& BMSR_10HALF
)))
1887 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1892 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1897 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1902 pr_info(PFX
"Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1903 np
->port
, bmcr
, bmsr
);
1909 static int xcvr_init_1g(struct niu
*np
)
1913 /* XXX shared resource, lock parent XXX */
1914 val
= nr64(MIF_CONFIG
);
1915 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1916 nw64(MIF_CONFIG
, val
);
1918 return mii_init_common(np
);
1921 static int niu_xcvr_init(struct niu
*np
)
1923 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1928 err
= ops
->xcvr_init(np
);
1933 static int niu_serdes_init(struct niu
*np
)
1935 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1939 if (ops
->serdes_init
)
1940 err
= ops
->serdes_init(np
);
1945 static void niu_init_xif(struct niu
*);
1946 static void niu_handle_led(struct niu
*, int status
);
1948 static int niu_link_status_common(struct niu
*np
, int link_up
)
1950 struct niu_link_config
*lp
= &np
->link_config
;
1951 struct net_device
*dev
= np
->dev
;
1952 unsigned long flags
;
1954 if (!netif_carrier_ok(dev
) && link_up
) {
1955 niuinfo(LINK
, "%s: Link is up at %s, %s duplex\n",
1957 (lp
->active_speed
== SPEED_10000
?
1959 (lp
->active_speed
== SPEED_1000
?
1961 (lp
->active_speed
== SPEED_100
?
1962 "100Mbit/sec" : "10Mbit/sec"))),
1963 (lp
->active_duplex
== DUPLEX_FULL
?
1966 spin_lock_irqsave(&np
->lock
, flags
);
1968 niu_handle_led(np
, 1);
1969 spin_unlock_irqrestore(&np
->lock
, flags
);
1971 netif_carrier_on(dev
);
1972 } else if (netif_carrier_ok(dev
) && !link_up
) {
1973 niuwarn(LINK
, "%s: Link is down\n", dev
->name
);
1974 spin_lock_irqsave(&np
->lock
, flags
);
1975 niu_handle_led(np
, 0);
1976 spin_unlock_irqrestore(&np
->lock
, flags
);
1977 netif_carrier_off(dev
);
1983 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1985 int err
, link_up
, pma_status
, pcs_status
;
1989 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1990 MRVL88X2011_10G_PMD_STATUS_2
);
1994 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1995 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1996 MRVL88X2011_PMA_PMD_STATUS_1
);
2000 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
2002 /* Check PMC Register : 3.0001.2 == 1: read twice */
2003 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
2004 MRVL88X2011_PMA_PMD_STATUS_1
);
2008 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
2009 MRVL88X2011_PMA_PMD_STATUS_1
);
2013 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
2015 /* Check XGXS Register : 4.0018.[0-3,12] */
2016 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
2017 MRVL88X2011_10G_XGXS_LANE_STAT
);
2021 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
2022 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
2023 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
2025 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
2027 np
->link_config
.active_speed
= SPEED_10000
;
2028 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2031 mrvl88x2011_act_led(np
, (link_up
?
2032 MRVL88X2011_LED_CTL_PCS_ACT
:
2033 MRVL88X2011_LED_CTL_OFF
));
2035 *link_up_p
= link_up
;
2039 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
2044 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2045 BCM8704_PMD_RCV_SIGDET
);
2046 if (err
< 0 || err
== 0xffff)
2048 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2053 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2054 BCM8704_PCS_10G_R_STATUS
);
2058 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2063 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2064 BCM8704_PHYXS_XGXS_LANE_STAT
);
2067 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2068 PHYXS_XGXS_LANE_STAT_MAGIC
|
2069 PHYXS_XGXS_LANE_STAT_PATTEST
|
2070 PHYXS_XGXS_LANE_STAT_LANE3
|
2071 PHYXS_XGXS_LANE_STAT_LANE2
|
2072 PHYXS_XGXS_LANE_STAT_LANE1
|
2073 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2075 np
->link_config
.active_speed
= SPEED_INVALID
;
2076 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
2081 np
->link_config
.active_speed
= SPEED_10000
;
2082 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2086 *link_up_p
= link_up
;
2090 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
2096 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2097 BCM8704_PMD_RCV_SIGDET
);
2100 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2105 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2106 BCM8704_PCS_10G_R_STATUS
);
2109 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2114 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2115 BCM8704_PHYXS_XGXS_LANE_STAT
);
2119 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2120 PHYXS_XGXS_LANE_STAT_MAGIC
|
2121 PHYXS_XGXS_LANE_STAT_LANE3
|
2122 PHYXS_XGXS_LANE_STAT_LANE2
|
2123 PHYXS_XGXS_LANE_STAT_LANE1
|
2124 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2130 np
->link_config
.active_speed
= SPEED_10000
;
2131 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2135 *link_up_p
= link_up
;
2139 static int link_status_10g(struct niu
*np
, int *link_up_p
)
2141 unsigned long flags
;
2144 spin_lock_irqsave(&np
->lock
, flags
);
2146 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2149 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
2150 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
2152 /* handle different phy types */
2153 switch (phy_id
& NIU_PHY_ID_MASK
) {
2154 case NIU_PHY_ID_MRVL88X2011
:
2155 err
= link_status_10g_mrvl(np
, link_up_p
);
2158 default: /* bcom 8704 */
2159 err
= link_status_10g_bcom(np
, link_up_p
);
2164 spin_unlock_irqrestore(&np
->lock
, flags
);
2169 static int niu_10g_phy_present(struct niu
*np
)
2173 sig
= nr64(ESR_INT_SIGNALS
);
2176 mask
= ESR_INT_SIGNALS_P0_BITS
;
2177 val
= (ESR_INT_SRDY0_P0
|
2180 ESR_INT_XDP_P0_CH3
|
2181 ESR_INT_XDP_P0_CH2
|
2182 ESR_INT_XDP_P0_CH1
|
2183 ESR_INT_XDP_P0_CH0
);
2187 mask
= ESR_INT_SIGNALS_P1_BITS
;
2188 val
= (ESR_INT_SRDY0_P1
|
2191 ESR_INT_XDP_P1_CH3
|
2192 ESR_INT_XDP_P1_CH2
|
2193 ESR_INT_XDP_P1_CH1
|
2194 ESR_INT_XDP_P1_CH0
);
2201 if ((sig
& mask
) != val
)
2206 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
2208 unsigned long flags
;
2211 int phy_present_prev
;
2213 spin_lock_irqsave(&np
->lock
, flags
);
2215 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2216 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
2218 phy_present
= niu_10g_phy_present(np
);
2219 if (phy_present
!= phy_present_prev
) {
2222 /* A NEM was just plugged in */
2223 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2224 if (np
->phy_ops
->xcvr_init
)
2225 err
= np
->phy_ops
->xcvr_init(np
);
2227 err
= mdio_read(np
, np
->phy_addr
,
2228 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
2229 if (err
== 0xffff) {
2230 /* No mdio, back-to-back XAUI */
2234 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2237 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2239 niuwarn(LINK
, "%s: Hotplug PHY Removed\n",
2244 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) {
2245 err
= link_status_10g_bcm8706(np
, link_up_p
);
2246 if (err
== 0xffff) {
2247 /* No mdio, back-to-back XAUI: it is C10NEM */
2249 np
->link_config
.active_speed
= SPEED_10000
;
2250 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2255 spin_unlock_irqrestore(&np
->lock
, flags
);
2260 static int niu_link_status(struct niu
*np
, int *link_up_p
)
2262 const struct niu_phy_ops
*ops
= np
->phy_ops
;
2266 if (ops
->link_status
)
2267 err
= ops
->link_status(np
, link_up_p
);
2272 static void niu_timer(unsigned long __opaque
)
2274 struct niu
*np
= (struct niu
*) __opaque
;
2278 err
= niu_link_status(np
, &link_up
);
2280 niu_link_status_common(np
, link_up
);
2282 if (netif_carrier_ok(np
->dev
))
2286 np
->timer
.expires
= jiffies
+ off
;
2288 add_timer(&np
->timer
);
2291 static const struct niu_phy_ops phy_ops_10g_serdes
= {
2292 .serdes_init
= serdes_init_10g_serdes
,
2293 .link_status
= link_status_10g_serdes
,
2296 static const struct niu_phy_ops phy_ops_10g_serdes_niu
= {
2297 .serdes_init
= serdes_init_niu_10g_serdes
,
2298 .link_status
= link_status_10g_serdes
,
2301 static const struct niu_phy_ops phy_ops_1g_serdes_niu
= {
2302 .serdes_init
= serdes_init_niu_1g_serdes
,
2303 .link_status
= link_status_1g_serdes
,
2306 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
2307 .xcvr_init
= xcvr_init_1g_rgmii
,
2308 .link_status
= link_status_1g_rgmii
,
2311 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
2312 .serdes_init
= serdes_init_niu_10g_fiber
,
2313 .xcvr_init
= xcvr_init_10g
,
2314 .link_status
= link_status_10g
,
2317 static const struct niu_phy_ops phy_ops_10g_fiber
= {
2318 .serdes_init
= serdes_init_10g
,
2319 .xcvr_init
= xcvr_init_10g
,
2320 .link_status
= link_status_10g
,
2323 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
2324 .serdes_init
= serdes_init_10g
,
2325 .xcvr_init
= xcvr_init_10g_bcm8706
,
2326 .link_status
= link_status_10g_hotplug
,
2329 static const struct niu_phy_ops phy_ops_niu_10g_hotplug
= {
2330 .serdes_init
= serdes_init_niu_10g_fiber
,
2331 .xcvr_init
= xcvr_init_10g_bcm8706
,
2332 .link_status
= link_status_10g_hotplug
,
2335 static const struct niu_phy_ops phy_ops_10g_copper
= {
2336 .serdes_init
= serdes_init_10g
,
2337 .link_status
= link_status_10g
, /* XXX */
2340 static const struct niu_phy_ops phy_ops_1g_fiber
= {
2341 .serdes_init
= serdes_init_1g
,
2342 .xcvr_init
= xcvr_init_1g
,
2343 .link_status
= link_status_1g
,
2346 static const struct niu_phy_ops phy_ops_1g_copper
= {
2347 .xcvr_init
= xcvr_init_1g
,
2348 .link_status
= link_status_1g
,
2351 struct niu_phy_template
{
2352 const struct niu_phy_ops
*ops
;
2356 static const struct niu_phy_template phy_template_niu_10g_fiber
= {
2357 .ops
= &phy_ops_10g_fiber_niu
,
2358 .phy_addr_base
= 16,
2361 static const struct niu_phy_template phy_template_niu_10g_serdes
= {
2362 .ops
= &phy_ops_10g_serdes_niu
,
2366 static const struct niu_phy_template phy_template_niu_1g_serdes
= {
2367 .ops
= &phy_ops_1g_serdes_niu
,
2371 static const struct niu_phy_template phy_template_10g_fiber
= {
2372 .ops
= &phy_ops_10g_fiber
,
2376 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2377 .ops
= &phy_ops_10g_fiber_hotplug
,
2381 static const struct niu_phy_template phy_template_niu_10g_hotplug
= {
2382 .ops
= &phy_ops_niu_10g_hotplug
,
2386 static const struct niu_phy_template phy_template_10g_copper
= {
2387 .ops
= &phy_ops_10g_copper
,
2388 .phy_addr_base
= 10,
2391 static const struct niu_phy_template phy_template_1g_fiber
= {
2392 .ops
= &phy_ops_1g_fiber
,
2396 static const struct niu_phy_template phy_template_1g_copper
= {
2397 .ops
= &phy_ops_1g_copper
,
2401 static const struct niu_phy_template phy_template_1g_rgmii
= {
2402 .ops
= &phy_ops_1g_rgmii
,
2406 static const struct niu_phy_template phy_template_10g_serdes
= {
2407 .ops
= &phy_ops_10g_serdes
,
2411 static int niu_atca_port_num
[4] = {
2415 static int serdes_init_10g_serdes(struct niu
*np
)
2417 struct niu_link_config
*lp
= &np
->link_config
;
2418 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2419 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2424 reset_val
= ENET_SERDES_RESET_0
;
2425 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2426 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2427 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2430 reset_val
= ENET_SERDES_RESET_1
;
2431 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2432 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2433 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2439 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2440 ENET_SERDES_CTRL_SDET_1
|
2441 ENET_SERDES_CTRL_SDET_2
|
2442 ENET_SERDES_CTRL_SDET_3
|
2443 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2444 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2445 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2446 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2447 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2448 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2449 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2450 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2453 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2454 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2455 ENET_SERDES_TEST_MD_0_SHIFT
) |
2456 (ENET_TEST_MD_PAD_LOOPBACK
<<
2457 ENET_SERDES_TEST_MD_1_SHIFT
) |
2458 (ENET_TEST_MD_PAD_LOOPBACK
<<
2459 ENET_SERDES_TEST_MD_2_SHIFT
) |
2460 (ENET_TEST_MD_PAD_LOOPBACK
<<
2461 ENET_SERDES_TEST_MD_3_SHIFT
));
2465 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2466 nw64(ctrl_reg
, ctrl_val
);
2467 nw64(test_cfg_reg
, test_cfg_val
);
2469 /* Initialize all 4 lanes of the SERDES. */
2470 for (i
= 0; i
< 4; i
++) {
2471 u32 rxtx_ctrl
, glue0
;
2474 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2477 err
= esr_read_glue0(np
, i
, &glue0
);
2481 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2482 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2483 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2485 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2486 ESR_GLUE_CTRL0_THCNT
|
2487 ESR_GLUE_CTRL0_BLTIME
);
2488 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2489 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2490 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2491 (BLTIME_300_CYCLES
<<
2492 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2494 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2497 err
= esr_write_glue0(np
, i
, glue0
);
2503 sig
= nr64(ESR_INT_SIGNALS
);
2506 mask
= ESR_INT_SIGNALS_P0_BITS
;
2507 val
= (ESR_INT_SRDY0_P0
|
2510 ESR_INT_XDP_P0_CH3
|
2511 ESR_INT_XDP_P0_CH2
|
2512 ESR_INT_XDP_P0_CH1
|
2513 ESR_INT_XDP_P0_CH0
);
2517 mask
= ESR_INT_SIGNALS_P1_BITS
;
2518 val
= (ESR_INT_SRDY0_P1
|
2521 ESR_INT_XDP_P1_CH3
|
2522 ESR_INT_XDP_P1_CH2
|
2523 ESR_INT_XDP_P1_CH1
|
2524 ESR_INT_XDP_P1_CH0
);
2531 if ((sig
& mask
) != val
) {
2533 err
= serdes_init_1g_serdes(np
);
2535 np
->flags
&= ~NIU_FLAGS_10G
;
2536 np
->mac_xcvr
= MAC_XCVR_PCS
;
2538 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES Link Failed \n",
2547 static int niu_determine_phy_disposition(struct niu
*np
)
2549 struct niu_parent
*parent
= np
->parent
;
2550 u8 plat_type
= parent
->plat_type
;
2551 const struct niu_phy_template
*tp
;
2552 u32 phy_addr_off
= 0;
2554 if (plat_type
== PLAT_TYPE_NIU
) {
2558 NIU_FLAGS_XCVR_SERDES
)) {
2559 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2561 tp
= &phy_template_niu_10g_serdes
;
2563 case NIU_FLAGS_XCVR_SERDES
:
2565 tp
= &phy_template_niu_1g_serdes
;
2567 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2570 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2571 tp
= &phy_template_niu_10g_hotplug
;
2577 tp
= &phy_template_niu_10g_fiber
;
2578 phy_addr_off
+= np
->port
;
2586 NIU_FLAGS_XCVR_SERDES
)) {
2589 tp
= &phy_template_1g_copper
;
2590 if (plat_type
== PLAT_TYPE_VF_P0
)
2592 else if (plat_type
== PLAT_TYPE_VF_P1
)
2595 phy_addr_off
+= (np
->port
^ 0x3);
2600 tp
= &phy_template_10g_copper
;
2603 case NIU_FLAGS_FIBER
:
2605 tp
= &phy_template_1g_fiber
;
2608 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2610 tp
= &phy_template_10g_fiber
;
2611 if (plat_type
== PLAT_TYPE_VF_P0
||
2612 plat_type
== PLAT_TYPE_VF_P1
)
2614 phy_addr_off
+= np
->port
;
2615 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2616 tp
= &phy_template_10g_fiber_hotplug
;
2624 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2625 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2626 case NIU_FLAGS_XCVR_SERDES
:
2630 tp
= &phy_template_10g_serdes
;
2634 tp
= &phy_template_1g_rgmii
;
2640 phy_addr_off
= niu_atca_port_num
[np
->port
];
2648 np
->phy_ops
= tp
->ops
;
2649 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2654 static int niu_init_link(struct niu
*np
)
2656 struct niu_parent
*parent
= np
->parent
;
2659 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2660 err
= niu_xcvr_init(np
);
2665 err
= niu_serdes_init(np
);
2666 if (err
&& !(np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2669 err
= niu_xcvr_init(np
);
2670 if (!err
|| (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2671 niu_link_status(np
, &ignore
);
2675 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2677 u16 reg0
= addr
[4] << 8 | addr
[5];
2678 u16 reg1
= addr
[2] << 8 | addr
[3];
2679 u16 reg2
= addr
[0] << 8 | addr
[1];
2681 if (np
->flags
& NIU_FLAGS_XMAC
) {
2682 nw64_mac(XMAC_ADDR0
, reg0
);
2683 nw64_mac(XMAC_ADDR1
, reg1
);
2684 nw64_mac(XMAC_ADDR2
, reg2
);
2686 nw64_mac(BMAC_ADDR0
, reg0
);
2687 nw64_mac(BMAC_ADDR1
, reg1
);
2688 nw64_mac(BMAC_ADDR2
, reg2
);
2692 static int niu_num_alt_addr(struct niu
*np
)
2694 if (np
->flags
& NIU_FLAGS_XMAC
)
2695 return XMAC_NUM_ALT_ADDR
;
2697 return BMAC_NUM_ALT_ADDR
;
2700 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2702 u16 reg0
= addr
[4] << 8 | addr
[5];
2703 u16 reg1
= addr
[2] << 8 | addr
[3];
2704 u16 reg2
= addr
[0] << 8 | addr
[1];
2706 if (index
>= niu_num_alt_addr(np
))
2709 if (np
->flags
& NIU_FLAGS_XMAC
) {
2710 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2711 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2712 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2714 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2715 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2716 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2722 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2727 if (index
>= niu_num_alt_addr(np
))
2730 if (np
->flags
& NIU_FLAGS_XMAC
) {
2731 reg
= XMAC_ADDR_CMPEN
;
2734 reg
= BMAC_ADDR_CMPEN
;
2735 mask
= 1 << (index
+ 1);
2738 val
= nr64_mac(reg
);
2748 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2749 int num
, int mac_pref
)
2751 u64 val
= nr64_mac(reg
);
2752 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2755 val
|= HOST_INFO_MPR
;
2759 static int __set_rdc_table_num(struct niu
*np
,
2760 int xmac_index
, int bmac_index
,
2761 int rdc_table_num
, int mac_pref
)
2765 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2767 if (np
->flags
& NIU_FLAGS_XMAC
)
2768 reg
= XMAC_HOST_INFO(xmac_index
);
2770 reg
= BMAC_HOST_INFO(bmac_index
);
2771 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2775 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2778 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2781 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2784 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2787 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2788 int table_num
, int mac_pref
)
2790 if (idx
>= niu_num_alt_addr(np
))
2792 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2795 static u64
vlan_entry_set_parity(u64 reg_val
)
2800 port01_mask
= 0x00ff;
2801 port23_mask
= 0xff00;
2803 if (hweight64(reg_val
& port01_mask
) & 1)
2804 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2806 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2808 if (hweight64(reg_val
& port23_mask
) & 1)
2809 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2811 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2816 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2817 int port
, int vpr
, int rdc_table
)
2819 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2821 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2822 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2823 ENET_VLAN_TBL_SHIFT(port
));
2825 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2826 ENET_VLAN_TBL_SHIFT(port
));
2827 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2829 reg_val
= vlan_entry_set_parity(reg_val
);
2831 nw64(ENET_VLAN_TBL(index
), reg_val
);
2834 static void vlan_tbl_clear(struct niu
*np
)
2838 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2839 nw64(ENET_VLAN_TBL(i
), 0);
2842 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2846 while (--limit
> 0) {
2847 if (nr64(TCAM_CTL
) & bit
)
2857 static int tcam_flush(struct niu
*np
, int index
)
2859 nw64(TCAM_KEY_0
, 0x00);
2860 nw64(TCAM_KEY_MASK_0
, 0xff);
2861 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2863 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2867 static int tcam_read(struct niu
*np
, int index
,
2868 u64
*key
, u64
*mask
)
2872 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2873 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2875 key
[0] = nr64(TCAM_KEY_0
);
2876 key
[1] = nr64(TCAM_KEY_1
);
2877 key
[2] = nr64(TCAM_KEY_2
);
2878 key
[3] = nr64(TCAM_KEY_3
);
2879 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2880 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2881 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2882 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2888 static int tcam_write(struct niu
*np
, int index
,
2889 u64
*key
, u64
*mask
)
2891 nw64(TCAM_KEY_0
, key
[0]);
2892 nw64(TCAM_KEY_1
, key
[1]);
2893 nw64(TCAM_KEY_2
, key
[2]);
2894 nw64(TCAM_KEY_3
, key
[3]);
2895 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2896 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2897 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2898 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2899 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2901 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2905 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2909 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2910 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2912 *data
= nr64(TCAM_KEY_1
);
2918 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2920 nw64(TCAM_KEY_1
, assoc_data
);
2921 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2923 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2926 static void tcam_enable(struct niu
*np
, int on
)
2928 u64 val
= nr64(FFLP_CFG_1
);
2931 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2933 val
|= FFLP_CFG_1_TCAM_DIS
;
2934 nw64(FFLP_CFG_1
, val
);
2937 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2939 u64 val
= nr64(FFLP_CFG_1
);
2941 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2943 FFLP_CFG_1_CAMRATIO
);
2944 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2945 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2946 nw64(FFLP_CFG_1
, val
);
2948 val
= nr64(FFLP_CFG_1
);
2949 val
|= FFLP_CFG_1_FFLPINITDONE
;
2950 nw64(FFLP_CFG_1
, val
);
2953 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2959 if (class < CLASS_CODE_ETHERTYPE1
||
2960 class > CLASS_CODE_ETHERTYPE2
)
2963 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2975 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2981 if (class < CLASS_CODE_ETHERTYPE1
||
2982 class > CLASS_CODE_ETHERTYPE2
||
2983 (ether_type
& ~(u64
)0xffff) != 0)
2986 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2988 val
&= ~L2_CLS_ETYPE
;
2989 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2996 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
3002 if (class < CLASS_CODE_USER_PROG1
||
3003 class > CLASS_CODE_USER_PROG4
)
3006 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
3009 val
|= L3_CLS_VALID
;
3011 val
&= ~L3_CLS_VALID
;
3017 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
3018 int ipv6
, u64 protocol_id
,
3019 u64 tos_mask
, u64 tos_val
)
3024 if (class < CLASS_CODE_USER_PROG1
||
3025 class > CLASS_CODE_USER_PROG4
||
3026 (protocol_id
& ~(u64
)0xff) != 0 ||
3027 (tos_mask
& ~(u64
)0xff) != 0 ||
3028 (tos_val
& ~(u64
)0xff) != 0)
3031 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
3033 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
3034 L3_CLS_TOSMASK
| L3_CLS_TOS
);
3036 val
|= L3_CLS_IPVER
;
3037 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
3038 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
3039 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
3045 static int tcam_early_init(struct niu
*np
)
3051 tcam_set_lat_and_ratio(np
,
3052 DEFAULT_TCAM_LATENCY
,
3053 DEFAULT_TCAM_ACCESS_RATIO
);
3054 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
3055 err
= tcam_user_eth_class_enable(np
, i
, 0);
3059 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
3060 err
= tcam_user_ip_class_enable(np
, i
, 0);
3068 static int tcam_flush_all(struct niu
*np
)
3072 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
3073 int err
= tcam_flush(np
, i
);
3080 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
3082 return ((u64
)index
| (num_entries
== 1 ?
3083 HASH_TBL_ADDR_AUTOINC
: 0));
3087 static int hash_read(struct niu
*np
, unsigned long partition
,
3088 unsigned long index
, unsigned long num_entries
,
3091 u64 val
= hash_addr_regval(index
, num_entries
);
3094 if (partition
>= FCRAM_NUM_PARTITIONS
||
3095 index
+ num_entries
> FCRAM_SIZE
)
3098 nw64(HASH_TBL_ADDR(partition
), val
);
3099 for (i
= 0; i
< num_entries
; i
++)
3100 data
[i
] = nr64(HASH_TBL_DATA(partition
));
3106 static int hash_write(struct niu
*np
, unsigned long partition
,
3107 unsigned long index
, unsigned long num_entries
,
3110 u64 val
= hash_addr_regval(index
, num_entries
);
3113 if (partition
>= FCRAM_NUM_PARTITIONS
||
3114 index
+ (num_entries
* 8) > FCRAM_SIZE
)
3117 nw64(HASH_TBL_ADDR(partition
), val
);
3118 for (i
= 0; i
< num_entries
; i
++)
3119 nw64(HASH_TBL_DATA(partition
), data
[i
]);
3124 static void fflp_reset(struct niu
*np
)
3128 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
3130 nw64(FFLP_CFG_1
, 0);
3132 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
3133 nw64(FFLP_CFG_1
, val
);
3136 static void fflp_set_timings(struct niu
*np
)
3138 u64 val
= nr64(FFLP_CFG_1
);
3140 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
3141 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
3142 nw64(FFLP_CFG_1
, val
);
3144 val
= nr64(FFLP_CFG_1
);
3145 val
|= FFLP_CFG_1_FFLPINITDONE
;
3146 nw64(FFLP_CFG_1
, val
);
3148 val
= nr64(FCRAM_REF_TMR
);
3149 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
3150 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
3151 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
3152 nw64(FCRAM_REF_TMR
, val
);
3155 static int fflp_set_partition(struct niu
*np
, u64 partition
,
3156 u64 mask
, u64 base
, int enable
)
3161 if (partition
>= FCRAM_NUM_PARTITIONS
||
3162 (mask
& ~(u64
)0x1f) != 0 ||
3163 (base
& ~(u64
)0x1f) != 0)
3166 reg
= FLW_PRT_SEL(partition
);
3169 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
3170 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
3171 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
3173 val
|= FLW_PRT_SEL_EXT
;
3179 static int fflp_disable_all_partitions(struct niu
*np
)
3183 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
3184 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
3191 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
3193 u64 val
= nr64(FFLP_CFG_1
);
3196 val
|= FFLP_CFG_1_LLCSNAP
;
3198 val
&= ~FFLP_CFG_1_LLCSNAP
;
3199 nw64(FFLP_CFG_1
, val
);
3202 static void fflp_errors_enable(struct niu
*np
, int on
)
3204 u64 val
= nr64(FFLP_CFG_1
);
3207 val
&= ~FFLP_CFG_1_ERRORDIS
;
3209 val
|= FFLP_CFG_1_ERRORDIS
;
3210 nw64(FFLP_CFG_1
, val
);
3213 static int fflp_hash_clear(struct niu
*np
)
3215 struct fcram_hash_ipv4 ent
;
3218 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3219 memset(&ent
, 0, sizeof(ent
));
3220 ent
.header
= HASH_HEADER_EXT
;
3222 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
3223 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
3230 static int fflp_early_init(struct niu
*np
)
3232 struct niu_parent
*parent
;
3233 unsigned long flags
;
3236 niu_lock_parent(np
, flags
);
3238 parent
= np
->parent
;
3240 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
3241 niudbg(PROBE
, "fflp_early_init: Initting hw on port %u\n",
3243 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3245 fflp_set_timings(np
);
3246 err
= fflp_disable_all_partitions(np
);
3248 niudbg(PROBE
, "fflp_disable_all_partitions "
3249 "failed, err=%d\n", err
);
3254 err
= tcam_early_init(np
);
3256 niudbg(PROBE
, "tcam_early_init failed, err=%d\n",
3260 fflp_llcsnap_enable(np
, 1);
3261 fflp_errors_enable(np
, 0);
3265 err
= tcam_flush_all(np
);
3267 niudbg(PROBE
, "tcam_flush_all failed, err=%d\n",
3271 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3272 err
= fflp_hash_clear(np
);
3274 niudbg(PROBE
, "fflp_hash_clear failed, "
3282 niudbg(PROBE
, "fflp_early_init: Success\n");
3283 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
3286 niu_unlock_parent(np
, flags
);
3290 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3292 if (class_code
< CLASS_CODE_USER_PROG1
||
3293 class_code
> CLASS_CODE_SCTP_IPV6
)
3296 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3300 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3302 if (class_code
< CLASS_CODE_USER_PROG1
||
3303 class_code
> CLASS_CODE_SCTP_IPV6
)
3306 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3310 /* Entries for the ports are interleaved in the TCAM */
3311 static u16
tcam_get_index(struct niu
*np
, u16 idx
)
3313 /* One entry reserved for IP fragment rule */
3314 if (idx
>= (np
->clas
.tcam_sz
- 1))
3316 return (np
->clas
.tcam_top
+ ((idx
+1) * np
->parent
->num_ports
));
3319 static u16
tcam_get_size(struct niu
*np
)
3321 /* One entry reserved for IP fragment rule */
3322 return np
->clas
.tcam_sz
- 1;
3325 static u16
tcam_get_valid_entry_cnt(struct niu
*np
)
3327 /* One entry reserved for IP fragment rule */
3328 return np
->clas
.tcam_valid_entries
- 1;
3331 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
3332 u32 offset
, u32 size
)
3334 int i
= skb_shinfo(skb
)->nr_frags
;
3335 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3338 frag
->page_offset
= offset
;
3342 skb
->data_len
+= size
;
3343 skb
->truesize
+= size
;
3345 skb_shinfo(skb
)->nr_frags
= i
+ 1;
3348 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
3351 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
3353 return (a
& (MAX_RBR_RING_SIZE
- 1));
3356 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
3357 struct page
***link
)
3359 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
3360 struct page
*p
, **pp
;
3363 pp
= &rp
->rxhash
[h
];
3364 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
3365 if (p
->index
== addr
) {
3374 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
3376 unsigned int h
= niu_hash_rxaddr(rp
, base
);
3379 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
3380 rp
->rxhash
[h
] = page
;
3383 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
3384 gfp_t mask
, int start_index
)
3390 page
= alloc_page(mask
);
3394 addr
= np
->ops
->map_page(np
->device
, page
, 0,
3395 PAGE_SIZE
, DMA_FROM_DEVICE
);
3397 niu_hash_page(rp
, page
, addr
);
3398 if (rp
->rbr_blocks_per_page
> 1)
3399 atomic_add(rp
->rbr_blocks_per_page
- 1,
3400 &compound_head(page
)->_count
);
3402 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
3403 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
3405 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
3406 addr
+= rp
->rbr_block_size
;
3412 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3414 int index
= rp
->rbr_index
;
3417 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3418 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3420 if (unlikely(err
)) {
3425 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3426 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3427 if (rp
->rbr_index
== rp
->rbr_table_size
)
3430 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3431 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3432 rp
->rbr_pending
= 0;
3437 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3439 unsigned int index
= rp
->rcr_index
;
3444 struct page
*page
, **link
;
3450 val
= le64_to_cpup(&rp
->rcr
[index
]);
3451 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3452 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3453 page
= niu_find_rxpage(rp
, addr
, &link
);
3455 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3456 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3457 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3458 *link
= (struct page
*) page
->mapping
;
3459 np
->ops
->unmap_page(np
->device
, page
->index
,
3460 PAGE_SIZE
, DMA_FROM_DEVICE
);
3462 page
->mapping
= NULL
;
3464 rp
->rbr_refill_pending
++;
3467 index
= NEXT_RCR(rp
, index
);
3468 if (!(val
& RCR_ENTRY_MULTI
))
3472 rp
->rcr_index
= index
;
3477 static int niu_process_rx_pkt(struct napi_struct
*napi
, struct niu
*np
,
3478 struct rx_ring_info
*rp
)
3480 unsigned int index
= rp
->rcr_index
;
3481 struct sk_buff
*skb
;
3484 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3486 return niu_rx_pkt_ignore(np
, rp
);
3490 struct page
*page
, **link
;
3491 u32 rcr_size
, append_size
;
3496 val
= le64_to_cpup(&rp
->rcr
[index
]);
3498 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3499 RCR_ENTRY_L2_LEN_SHIFT
;
3502 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3503 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3504 page
= niu_find_rxpage(rp
, addr
, &link
);
3506 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3507 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3509 off
= addr
& ~PAGE_MASK
;
3510 append_size
= rcr_size
;
3517 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3518 if ((ptype
== RCR_PKT_TYPE_TCP
||
3519 ptype
== RCR_PKT_TYPE_UDP
) &&
3520 !(val
& (RCR_ENTRY_NOPORT
|
3522 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3524 skb
->ip_summed
= CHECKSUM_NONE
;
3526 if (!(val
& RCR_ENTRY_MULTI
))
3527 append_size
= len
- skb
->len
;
3529 niu_rx_skb_append(skb
, page
, off
, append_size
);
3530 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3531 *link
= (struct page
*) page
->mapping
;
3532 np
->ops
->unmap_page(np
->device
, page
->index
,
3533 PAGE_SIZE
, DMA_FROM_DEVICE
);
3535 page
->mapping
= NULL
;
3536 rp
->rbr_refill_pending
++;
3540 index
= NEXT_RCR(rp
, index
);
3541 if (!(val
& RCR_ENTRY_MULTI
))
3545 rp
->rcr_index
= index
;
3547 skb_reserve(skb
, NET_IP_ALIGN
);
3548 __pskb_pull_tail(skb
, min(len
, NIU_RXPULL_MAX
));
3551 rp
->rx_bytes
+= skb
->len
;
3553 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3554 skb_record_rx_queue(skb
, rp
->rx_channel
);
3555 napi_gro_receive(napi
, skb
);
3560 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3562 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3563 int err
, index
= rp
->rbr_index
;
3566 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3567 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3571 index
+= blocks_per_page
;
3574 rp
->rbr_index
= index
;
3578 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3582 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3585 page
= rp
->rxhash
[i
];
3587 struct page
*next
= (struct page
*) page
->mapping
;
3588 u64 base
= page
->index
;
3590 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3593 page
->mapping
= NULL
;
3601 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3602 rp
->rbr
[i
] = cpu_to_le32(0);
3606 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3608 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3609 struct sk_buff
*skb
= tb
->skb
;
3610 struct tx_pkt_hdr
*tp
;
3614 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3615 tx_flags
= le64_to_cpup(&tp
->flags
);
3618 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3619 ((tx_flags
& TXHDR_PAD
) / 2));
3621 len
= skb_headlen(skb
);
3622 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3623 len
, DMA_TO_DEVICE
);
3625 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3630 idx
= NEXT_TX(rp
, idx
);
3631 len
-= MAX_TX_DESC_LEN
;
3634 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3635 tb
= &rp
->tx_buffs
[idx
];
3636 BUG_ON(tb
->skb
!= NULL
);
3637 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3638 skb_shinfo(skb
)->frags
[i
].size
,
3640 idx
= NEXT_TX(rp
, idx
);
3648 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3650 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3652 struct netdev_queue
*txq
;
3657 index
= (rp
- np
->tx_rings
);
3658 txq
= netdev_get_tx_queue(np
->dev
, index
);
3661 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3664 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3665 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3666 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3668 rp
->last_pkt_cnt
= tmp
;
3672 niudbg(TX_DONE
, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3673 np
->dev
->name
, pkt_cnt
, cons
);
3676 cons
= release_tx_packet(np
, rp
, cons
);
3682 if (unlikely(netif_tx_queue_stopped(txq
) &&
3683 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3684 __netif_tx_lock(txq
, smp_processor_id());
3685 if (netif_tx_queue_stopped(txq
) &&
3686 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3687 netif_tx_wake_queue(txq
);
3688 __netif_tx_unlock(txq
);
3692 static inline void niu_sync_rx_discard_stats(struct niu
*np
,
3693 struct rx_ring_info
*rp
,
3696 /* This elaborate scheme is needed for reading the RX discard
3697 * counters, as they are only 16-bit and can overflow quickly,
3698 * and because the overflow indication bit is not usable as
3699 * the counter value does not wrap, but remains at max value
3702 * In theory and in practice counters can be lost in between
3703 * reading nr64() and clearing the counter nw64(). For this
3704 * reason, the number of counter clearings nw64() is
3705 * limited/reduced though the limit parameter.
3707 int rx_channel
= rp
->rx_channel
;
3710 /* RXMISC (Receive Miscellaneous Discard Count), covers the
3711 * following discard events: IPP (Input Port Process),
3712 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3713 * Block Ring) prefetch buffer is empty.
3715 misc
= nr64(RXMISC(rx_channel
));
3716 if (unlikely((misc
& RXMISC_COUNT
) > limit
)) {
3717 nw64(RXMISC(rx_channel
), 0);
3718 rp
->rx_errors
+= misc
& RXMISC_COUNT
;
3720 if (unlikely(misc
& RXMISC_OFLOW
))
3721 dev_err(np
->device
, "rx-%d: Counter overflow "
3722 "RXMISC discard\n", rx_channel
);
3724 niudbg(RX_ERR
, "%s-rx-%d: MISC drop=%u over=%u\n",
3725 np
->dev
->name
, rx_channel
, misc
, misc
-limit
);
3728 /* WRED (Weighted Random Early Discard) by hardware */
3729 wred
= nr64(RED_DIS_CNT(rx_channel
));
3730 if (unlikely((wred
& RED_DIS_CNT_COUNT
) > limit
)) {
3731 nw64(RED_DIS_CNT(rx_channel
), 0);
3732 rp
->rx_dropped
+= wred
& RED_DIS_CNT_COUNT
;
3734 if (unlikely(wred
& RED_DIS_CNT_OFLOW
))
3735 dev_err(np
->device
, "rx-%d: Counter overflow "
3736 "WRED discard\n", rx_channel
);
3738 niudbg(RX_ERR
, "%s-rx-%d: WRED drop=%u over=%u\n",
3739 np
->dev
->name
, rx_channel
, wred
, wred
-limit
);
3743 static int niu_rx_work(struct napi_struct
*napi
, struct niu
*np
,
3744 struct rx_ring_info
*rp
, int budget
)
3746 int qlen
, rcr_done
= 0, work_done
= 0;
3747 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3751 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3752 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3754 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3755 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3757 mbox
->rx_dma_ctl_stat
= 0;
3758 mbox
->rcrstat_a
= 0;
3760 niudbg(RX_STATUS
, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3761 np
->dev
->name
, rp
->rx_channel
, (unsigned long long) stat
, qlen
);
3763 rcr_done
= work_done
= 0;
3764 qlen
= min(qlen
, budget
);
3765 while (work_done
< qlen
) {
3766 rcr_done
+= niu_process_rx_pkt(napi
, np
, rp
);
3770 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3773 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3774 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3775 rp
->rbr_refill_pending
= 0;
3778 stat
= (RX_DMA_CTL_STAT_MEX
|
3779 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3780 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3782 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3784 /* Only sync discards stats when qlen indicate potential for drops */
3786 niu_sync_rx_discard_stats(np
, rp
, 0x7FFF);
3791 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3794 u32 tx_vec
= (v0
>> 32);
3795 u32 rx_vec
= (v0
& 0xffffffff);
3796 int i
, work_done
= 0;
3798 niudbg(INTR
, "%s: niu_poll_core() v0[%016llx]\n",
3799 np
->dev
->name
, (unsigned long long) v0
);
3801 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3802 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3803 if (tx_vec
& (1 << rp
->tx_channel
))
3804 niu_tx_work(np
, rp
);
3805 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3808 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3809 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3811 if (rx_vec
& (1 << rp
->rx_channel
)) {
3814 this_work_done
= niu_rx_work(&lp
->napi
, np
, rp
,
3817 budget
-= this_work_done
;
3818 work_done
+= this_work_done
;
3820 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3826 static int niu_poll(struct napi_struct
*napi
, int budget
)
3828 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3829 struct niu
*np
= lp
->np
;
3832 work_done
= niu_poll_core(np
, lp
, budget
);
3834 if (work_done
< budget
) {
3835 napi_complete(napi
);
3836 niu_ldg_rearm(np
, lp
, 1);
3841 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3844 dev_err(np
->device
, PFX
"%s: RX channel %u errors ( ",
3845 np
->dev
->name
, rp
->rx_channel
);
3847 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3848 printk("RBR_TMOUT ");
3849 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3851 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3852 printk("BYTE_EN_BUS ");
3853 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3855 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3857 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3858 printk("RCR_SHA_PAR ");
3859 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3860 printk("RBR_PRE_PAR ");
3861 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3863 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3864 printk("RCRINCON ");
3865 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3867 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3869 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3870 printk("RBRLOGPAGE ");
3871 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3872 printk("CFIGLOGPAGE ");
3873 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3879 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3881 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3885 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3886 RX_DMA_CTL_STAT_PORT_FATAL
))
3890 dev_err(np
->device
, PFX
"%s: RX channel %u error, stat[%llx]\n",
3891 np
->dev
->name
, rp
->rx_channel
,
3892 (unsigned long long) stat
);
3894 niu_log_rxchan_errors(np
, rp
, stat
);
3897 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3898 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3903 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3906 dev_err(np
->device
, PFX
"%s: TX channel %u errors ( ",
3907 np
->dev
->name
, rp
->tx_channel
);
3909 if (cs
& TX_CS_MBOX_ERR
)
3911 if (cs
& TX_CS_PKT_SIZE_ERR
)
3912 printk("PKT_SIZE ");
3913 if (cs
& TX_CS_TX_RING_OFLOW
)
3914 printk("TX_RING_OFLOW ");
3915 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3916 printk("PREF_BUF_PAR ");
3917 if (cs
& TX_CS_NACK_PREF
)
3918 printk("NACK_PREF ");
3919 if (cs
& TX_CS_NACK_PKT_RD
)
3920 printk("NACK_PKT_RD ");
3921 if (cs
& TX_CS_CONF_PART_ERR
)
3922 printk("CONF_PART ");
3923 if (cs
& TX_CS_PKT_PRT_ERR
)
3929 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3933 cs
= nr64(TX_CS(rp
->tx_channel
));
3934 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3935 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3937 dev_err(np
->device
, PFX
"%s: TX channel %u error, "
3938 "cs[%llx] logh[%llx] logl[%llx]\n",
3939 np
->dev
->name
, rp
->tx_channel
,
3940 (unsigned long long) cs
,
3941 (unsigned long long) logh
,
3942 (unsigned long long) logl
);
3944 niu_log_txchan_errors(np
, rp
, cs
);
3949 static int niu_mif_interrupt(struct niu
*np
)
3951 u64 mif_status
= nr64(MIF_STATUS
);
3954 if (np
->flags
& NIU_FLAGS_XMAC
) {
3955 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3957 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3961 dev_err(np
->device
, PFX
"%s: MIF interrupt, "
3962 "stat[%llx] phy_mdint(%d)\n",
3963 np
->dev
->name
, (unsigned long long) mif_status
, phy_mdint
);
3968 static void niu_xmac_interrupt(struct niu
*np
)
3970 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3973 val
= nr64_mac(XTXMAC_STATUS
);
3974 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3975 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3976 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3977 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3978 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3979 mp
->tx_fifo_errors
++;
3980 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3981 mp
->tx_overflow_errors
++;
3982 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3983 mp
->tx_max_pkt_size_errors
++;
3984 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3985 mp
->tx_underflow_errors
++;
3987 val
= nr64_mac(XRXMAC_STATUS
);
3988 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3989 mp
->rx_local_faults
++;
3990 if (val
& XRXMAC_STATUS_RFLT_DET
)
3991 mp
->rx_remote_faults
++;
3992 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3993 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3994 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3995 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3996 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3997 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3998 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3999 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
4000 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
4001 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
4002 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
4003 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
4004 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
4005 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
4006 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
4007 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
4008 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
4009 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
4010 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
4011 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
4012 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
4013 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
4014 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
4015 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
4016 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
4017 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
4018 if (val
& XRXMAC_STAT_MSK_RXOCTET_CNT_EXP
)
4019 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
4020 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
4021 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
4022 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
4023 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
4024 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
4025 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
4026 if (val
& XRXMAC_STATUS_RXUFLOW
)
4027 mp
->rx_underflows
++;
4028 if (val
& XRXMAC_STATUS_RXOFLOW
)
4031 val
= nr64_mac(XMAC_FC_STAT
);
4032 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
4033 mp
->pause_off_state
++;
4034 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
4035 mp
->pause_on_state
++;
4036 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
4037 mp
->pause_received
++;
4040 static void niu_bmac_interrupt(struct niu
*np
)
4042 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
4045 val
= nr64_mac(BTXMAC_STATUS
);
4046 if (val
& BTXMAC_STATUS_UNDERRUN
)
4047 mp
->tx_underflow_errors
++;
4048 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
4049 mp
->tx_max_pkt_size_errors
++;
4050 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
4051 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
4052 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
4053 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
4055 val
= nr64_mac(BRXMAC_STATUS
);
4056 if (val
& BRXMAC_STATUS_OVERFLOW
)
4058 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
4059 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
4060 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
4061 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4062 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
4063 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4064 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
4065 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
4067 val
= nr64_mac(BMAC_CTRL_STATUS
);
4068 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
4069 mp
->pause_off_state
++;
4070 if (val
& BMAC_CTRL_STATUS_PAUSE
)
4071 mp
->pause_on_state
++;
4072 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
4073 mp
->pause_received
++;
4076 static int niu_mac_interrupt(struct niu
*np
)
4078 if (np
->flags
& NIU_FLAGS_XMAC
)
4079 niu_xmac_interrupt(np
);
4081 niu_bmac_interrupt(np
);
4086 static void niu_log_device_error(struct niu
*np
, u64 stat
)
4088 dev_err(np
->device
, PFX
"%s: Core device errors ( ",
4091 if (stat
& SYS_ERR_MASK_META2
)
4093 if (stat
& SYS_ERR_MASK_META1
)
4095 if (stat
& SYS_ERR_MASK_PEU
)
4097 if (stat
& SYS_ERR_MASK_TXC
)
4099 if (stat
& SYS_ERR_MASK_RDMC
)
4101 if (stat
& SYS_ERR_MASK_TDMC
)
4103 if (stat
& SYS_ERR_MASK_ZCP
)
4105 if (stat
& SYS_ERR_MASK_FFLP
)
4107 if (stat
& SYS_ERR_MASK_IPP
)
4109 if (stat
& SYS_ERR_MASK_MAC
)
4111 if (stat
& SYS_ERR_MASK_SMX
)
4117 static int niu_device_error(struct niu
*np
)
4119 u64 stat
= nr64(SYS_ERR_STAT
);
4121 dev_err(np
->device
, PFX
"%s: Core device error, stat[%llx]\n",
4122 np
->dev
->name
, (unsigned long long) stat
);
4124 niu_log_device_error(np
, stat
);
4129 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
4130 u64 v0
, u64 v1
, u64 v2
)
4139 if (v1
& 0x00000000ffffffffULL
) {
4140 u32 rx_vec
= (v1
& 0xffffffff);
4142 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4143 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4145 if (rx_vec
& (1 << rp
->rx_channel
)) {
4146 int r
= niu_rx_error(np
, rp
);
4151 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
4152 RX_DMA_CTL_STAT_MEX
);
4157 if (v1
& 0x7fffffff00000000ULL
) {
4158 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
4160 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4161 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4163 if (tx_vec
& (1 << rp
->tx_channel
)) {
4164 int r
= niu_tx_error(np
, rp
);
4170 if ((v0
| v1
) & 0x8000000000000000ULL
) {
4171 int r
= niu_mif_interrupt(np
);
4177 int r
= niu_mac_interrupt(np
);
4182 int r
= niu_device_error(np
);
4189 niu_enable_interrupts(np
, 0);
4194 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
4197 struct rxdma_mailbox
*mbox
= rp
->mbox
;
4198 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
4200 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
4201 RX_DMA_CTL_STAT_RCRTO
);
4202 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
4204 niudbg(INTR
, "%s: rxchan_intr stat[%llx]\n",
4205 np
->dev
->name
, (unsigned long long) stat
);
4208 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
4211 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
4213 niudbg(INTR
, "%s: txchan_intr cs[%llx]\n",
4214 np
->dev
->name
, (unsigned long long) rp
->tx_cs
);
4217 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
4219 struct niu_parent
*parent
= np
->parent
;
4223 tx_vec
= (v0
>> 32);
4224 rx_vec
= (v0
& 0xffffffff);
4226 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4227 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4228 int ldn
= LDN_RXDMA(rp
->rx_channel
);
4230 if (parent
->ldg_map
[ldn
] != ldg
)
4233 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4234 if (rx_vec
& (1 << rp
->rx_channel
))
4235 niu_rxchan_intr(np
, rp
, ldn
);
4238 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4239 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4240 int ldn
= LDN_TXDMA(rp
->tx_channel
);
4242 if (parent
->ldg_map
[ldn
] != ldg
)
4245 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4246 if (tx_vec
& (1 << rp
->tx_channel
))
4247 niu_txchan_intr(np
, rp
, ldn
);
4251 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
4252 u64 v0
, u64 v1
, u64 v2
)
4254 if (likely(napi_schedule_prep(&lp
->napi
))) {
4258 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
4259 __napi_schedule(&lp
->napi
);
4263 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
4265 struct niu_ldg
*lp
= dev_id
;
4266 struct niu
*np
= lp
->np
;
4267 int ldg
= lp
->ldg_num
;
4268 unsigned long flags
;
4271 if (netif_msg_intr(np
))
4272 printk(KERN_DEBUG PFX
"niu_interrupt() ldg[%p](%d) ",
4275 spin_lock_irqsave(&np
->lock
, flags
);
4277 v0
= nr64(LDSV0(ldg
));
4278 v1
= nr64(LDSV1(ldg
));
4279 v2
= nr64(LDSV2(ldg
));
4281 if (netif_msg_intr(np
))
4282 printk("v0[%llx] v1[%llx] v2[%llx]\n",
4283 (unsigned long long) v0
,
4284 (unsigned long long) v1
,
4285 (unsigned long long) v2
);
4287 if (unlikely(!v0
&& !v1
&& !v2
)) {
4288 spin_unlock_irqrestore(&np
->lock
, flags
);
4292 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
4293 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
4297 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
4298 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
4300 niu_ldg_rearm(np
, lp
, 1);
4302 spin_unlock_irqrestore(&np
->lock
, flags
);
4307 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
4310 np
->ops
->free_coherent(np
->device
,
4311 sizeof(struct rxdma_mailbox
),
4312 rp
->mbox
, rp
->mbox_dma
);
4316 np
->ops
->free_coherent(np
->device
,
4317 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4318 rp
->rcr
, rp
->rcr_dma
);
4320 rp
->rcr_table_size
= 0;
4324 niu_rbr_free(np
, rp
);
4326 np
->ops
->free_coherent(np
->device
,
4327 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4328 rp
->rbr
, rp
->rbr_dma
);
4330 rp
->rbr_table_size
= 0;
4337 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
4340 np
->ops
->free_coherent(np
->device
,
4341 sizeof(struct txdma_mailbox
),
4342 rp
->mbox
, rp
->mbox_dma
);
4348 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
4349 if (rp
->tx_buffs
[i
].skb
)
4350 (void) release_tx_packet(np
, rp
, i
);
4353 np
->ops
->free_coherent(np
->device
,
4354 MAX_TX_RING_SIZE
* sizeof(__le64
),
4355 rp
->descr
, rp
->descr_dma
);
4364 static void niu_free_channels(struct niu
*np
)
4369 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4370 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4372 niu_free_rx_ring_info(np
, rp
);
4374 kfree(np
->rx_rings
);
4375 np
->rx_rings
= NULL
;
4376 np
->num_rx_rings
= 0;
4380 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4381 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4383 niu_free_tx_ring_info(np
, rp
);
4385 kfree(np
->tx_rings
);
4386 np
->tx_rings
= NULL
;
4387 np
->num_tx_rings
= 0;
4391 static int niu_alloc_rx_ring_info(struct niu
*np
,
4392 struct rx_ring_info
*rp
)
4394 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
4396 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
4401 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4402 sizeof(struct rxdma_mailbox
),
4403 &rp
->mbox_dma
, GFP_KERNEL
);
4406 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4407 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4408 "RXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4412 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
4413 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4414 &rp
->rcr_dma
, GFP_KERNEL
);
4417 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
4418 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4419 "RXDMA RCR table %p\n", np
->dev
->name
, rp
->rcr
);
4422 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
4425 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
4426 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4427 &rp
->rbr_dma
, GFP_KERNEL
);
4430 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
4431 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4432 "RXDMA RBR table %p\n", np
->dev
->name
, rp
->rbr
);
4435 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
4437 rp
->rbr_pending
= 0;
4442 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
4444 int mtu
= np
->dev
->mtu
;
4446 /* These values are recommended by the HW designers for fair
4447 * utilization of DRR amongst the rings.
4449 rp
->max_burst
= mtu
+ 32;
4450 if (rp
->max_burst
> 4096)
4451 rp
->max_burst
= 4096;
4454 static int niu_alloc_tx_ring_info(struct niu
*np
,
4455 struct tx_ring_info
*rp
)
4457 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
4459 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4460 sizeof(struct txdma_mailbox
),
4461 &rp
->mbox_dma
, GFP_KERNEL
);
4464 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4465 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4466 "TXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4470 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4471 MAX_TX_RING_SIZE
* sizeof(__le64
),
4472 &rp
->descr_dma
, GFP_KERNEL
);
4475 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4476 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4477 "TXDMA descr table %p\n", np
->dev
->name
, rp
->descr
);
4481 rp
->pending
= MAX_TX_RING_SIZE
;
4486 /* XXX make these configurable... XXX */
4487 rp
->mark_freq
= rp
->pending
/ 4;
4489 niu_set_max_burst(np
, rp
);
4494 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4498 bss
= min(PAGE_SHIFT
, 15);
4500 rp
->rbr_block_size
= 1 << bss
;
4501 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4503 rp
->rbr_sizes
[0] = 256;
4504 rp
->rbr_sizes
[1] = 1024;
4505 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4506 switch (PAGE_SIZE
) {
4508 rp
->rbr_sizes
[2] = 4096;
4512 rp
->rbr_sizes
[2] = 8192;
4516 rp
->rbr_sizes
[2] = 2048;
4518 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4521 static int niu_alloc_channels(struct niu
*np
)
4523 struct niu_parent
*parent
= np
->parent
;
4524 int first_rx_channel
, first_tx_channel
;
4528 first_rx_channel
= first_tx_channel
= 0;
4529 for (i
= 0; i
< port
; i
++) {
4530 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4531 first_tx_channel
+= parent
->txchan_per_port
[i
];
4534 np
->num_rx_rings
= parent
->rxchan_per_port
[port
];
4535 np
->num_tx_rings
= parent
->txchan_per_port
[port
];
4537 np
->dev
->real_num_tx_queues
= np
->num_tx_rings
;
4539 np
->rx_rings
= kzalloc(np
->num_rx_rings
* sizeof(struct rx_ring_info
),
4545 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4546 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4549 rp
->rx_channel
= first_rx_channel
+ i
;
4551 err
= niu_alloc_rx_ring_info(np
, rp
);
4555 niu_size_rbr(np
, rp
);
4557 /* XXX better defaults, configurable, etc... XXX */
4558 rp
->nonsyn_window
= 64;
4559 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4560 rp
->syn_window
= 64;
4561 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4562 rp
->rcr_pkt_threshold
= 16;
4563 rp
->rcr_timeout
= 8;
4564 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4565 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4566 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4568 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4573 np
->tx_rings
= kzalloc(np
->num_tx_rings
* sizeof(struct tx_ring_info
),
4579 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4580 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4583 rp
->tx_channel
= first_tx_channel
+ i
;
4585 err
= niu_alloc_tx_ring_info(np
, rp
);
4593 niu_free_channels(np
);
4597 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4601 while (--limit
> 0) {
4602 u64 val
= nr64(TX_CS(channel
));
4603 if (val
& TX_CS_SNG_STATE
)
4609 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4611 u64 val
= nr64(TX_CS(channel
));
4613 val
|= TX_CS_STOP_N_GO
;
4614 nw64(TX_CS(channel
), val
);
4616 return niu_tx_cs_sng_poll(np
, channel
);
4619 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4623 while (--limit
> 0) {
4624 u64 val
= nr64(TX_CS(channel
));
4625 if (!(val
& TX_CS_RST
))
4631 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4633 u64 val
= nr64(TX_CS(channel
));
4637 nw64(TX_CS(channel
), val
);
4639 err
= niu_tx_cs_reset_poll(np
, channel
);
4641 nw64(TX_RING_KICK(channel
), 0);
4646 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4650 nw64(TX_LOG_MASK1(channel
), 0);
4651 nw64(TX_LOG_VAL1(channel
), 0);
4652 nw64(TX_LOG_MASK2(channel
), 0);
4653 nw64(TX_LOG_VAL2(channel
), 0);
4654 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4655 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4656 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4658 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4659 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4660 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4662 /* XXX TXDMA 32bit mode? XXX */
4667 static void niu_txc_enable_port(struct niu
*np
, int on
)
4669 unsigned long flags
;
4672 niu_lock_parent(np
, flags
);
4673 val
= nr64(TXC_CONTROL
);
4674 mask
= (u64
)1 << np
->port
;
4676 val
|= TXC_CONTROL_ENABLE
| mask
;
4679 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4680 val
&= ~TXC_CONTROL_ENABLE
;
4682 nw64(TXC_CONTROL
, val
);
4683 niu_unlock_parent(np
, flags
);
4686 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4688 unsigned long flags
;
4691 niu_lock_parent(np
, flags
);
4692 val
= nr64(TXC_INT_MASK
);
4693 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4694 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4695 niu_unlock_parent(np
, flags
);
4698 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4705 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4706 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4708 nw64(TXC_PORT_DMA(np
->port
), val
);
4711 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4713 int err
, channel
= rp
->tx_channel
;
4716 err
= niu_tx_channel_stop(np
, channel
);
4720 err
= niu_tx_channel_reset(np
, channel
);
4724 err
= niu_tx_channel_lpage_init(np
, channel
);
4728 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4729 nw64(TX_ENT_MSK(channel
), 0);
4731 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4732 TX_RNG_CFIG_STADDR
)) {
4733 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4734 "DMA addr (%llx) is not aligned.\n",
4735 np
->dev
->name
, channel
,
4736 (unsigned long long) rp
->descr_dma
);
4740 /* The length field in TX_RNG_CFIG is measured in 64-byte
4741 * blocks. rp->pending is the number of TX descriptors in
4742 * our ring, 8 bytes each, thus we divide by 8 bytes more
4743 * to get the proper value the chip wants.
4745 ring_len
= (rp
->pending
/ 8);
4747 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4749 nw64(TX_RNG_CFIG(channel
), val
);
4751 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4752 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4753 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4754 "MBOX addr (%llx) is has illegal bits.\n",
4755 np
->dev
->name
, channel
,
4756 (unsigned long long) rp
->mbox_dma
);
4759 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4760 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4762 nw64(TX_CS(channel
), 0);
4764 rp
->last_pkt_cnt
= 0;
4769 static void niu_init_rdc_groups(struct niu
*np
)
4771 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4772 int i
, first_table_num
= tp
->first_table_num
;
4774 for (i
= 0; i
< tp
->num_tables
; i
++) {
4775 struct rdc_table
*tbl
= &tp
->tables
[i
];
4776 int this_table
= first_table_num
+ i
;
4779 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4780 nw64(RDC_TBL(this_table
, slot
),
4781 tbl
->rxdma_channel
[slot
]);
4784 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4787 static void niu_init_drr_weight(struct niu
*np
)
4789 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4794 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4799 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4802 nw64(PT_DRR_WT(np
->port
), val
);
4805 static int niu_init_hostinfo(struct niu
*np
)
4807 struct niu_parent
*parent
= np
->parent
;
4808 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4809 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4810 int first_rdc_table
= tp
->first_table_num
;
4812 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4816 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4820 for (i
= 0; i
< num_alt
; i
++) {
4821 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4829 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4831 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4832 RXDMA_CFIG1_RST
, 1000, 10,
4836 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4840 nw64(RX_LOG_MASK1(channel
), 0);
4841 nw64(RX_LOG_VAL1(channel
), 0);
4842 nw64(RX_LOG_MASK2(channel
), 0);
4843 nw64(RX_LOG_VAL2(channel
), 0);
4844 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4845 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4846 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4848 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4849 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4850 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4855 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4859 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4860 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4861 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4862 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4863 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4866 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4871 switch (rp
->rbr_block_size
) {
4873 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4876 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4879 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4882 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4887 val
|= RBR_CFIG_B_VLD2
;
4888 switch (rp
->rbr_sizes
[2]) {
4890 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4893 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4896 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4899 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4905 val
|= RBR_CFIG_B_VLD1
;
4906 switch (rp
->rbr_sizes
[1]) {
4908 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4911 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4914 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4917 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4923 val
|= RBR_CFIG_B_VLD0
;
4924 switch (rp
->rbr_sizes
[0]) {
4926 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4929 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4932 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4935 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4946 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4948 u64 val
= nr64(RXDMA_CFIG1(channel
));
4952 val
|= RXDMA_CFIG1_EN
;
4954 val
&= ~RXDMA_CFIG1_EN
;
4955 nw64(RXDMA_CFIG1(channel
), val
);
4958 while (--limit
> 0) {
4959 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4968 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4970 int err
, channel
= rp
->rx_channel
;
4973 err
= niu_rx_channel_reset(np
, channel
);
4977 err
= niu_rx_channel_lpage_init(np
, channel
);
4981 niu_rx_channel_wred_init(np
, rp
);
4983 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4984 nw64(RX_DMA_CTL_STAT(channel
),
4985 (RX_DMA_CTL_STAT_MEX
|
4986 RX_DMA_CTL_STAT_RCRTHRES
|
4987 RX_DMA_CTL_STAT_RCRTO
|
4988 RX_DMA_CTL_STAT_RBR_EMPTY
));
4989 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4990 nw64(RXDMA_CFIG2(channel
), (rp
->mbox_dma
& 0x00000000ffffffc0));
4991 nw64(RBR_CFIG_A(channel
),
4992 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4993 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4994 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4997 nw64(RBR_CFIG_B(channel
), val
);
4998 nw64(RCRCFIG_A(channel
),
4999 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
5000 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
5001 nw64(RCRCFIG_B(channel
),
5002 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
5004 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
5006 err
= niu_enable_rx_channel(np
, channel
, 1);
5010 nw64(RBR_KICK(channel
), rp
->rbr_index
);
5012 val
= nr64(RX_DMA_CTL_STAT(channel
));
5013 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
5014 nw64(RX_DMA_CTL_STAT(channel
), val
);
5019 static int niu_init_rx_channels(struct niu
*np
)
5021 unsigned long flags
;
5022 u64 seed
= jiffies_64
;
5025 niu_lock_parent(np
, flags
);
5026 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
5027 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
5028 niu_unlock_parent(np
, flags
);
5030 /* XXX RXDMA 32bit mode? XXX */
5032 niu_init_rdc_groups(np
);
5033 niu_init_drr_weight(np
);
5035 err
= niu_init_hostinfo(np
);
5039 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5040 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5042 err
= niu_init_one_rx_channel(np
, rp
);
5050 static int niu_set_ip_frag_rule(struct niu
*np
)
5052 struct niu_parent
*parent
= np
->parent
;
5053 struct niu_classifier
*cp
= &np
->clas
;
5054 struct niu_tcam_entry
*tp
;
5057 index
= cp
->tcam_top
;
5058 tp
= &parent
->tcam
[index
];
5060 /* Note that the noport bit is the same in both ipv4 and
5061 * ipv6 format TCAM entries.
5063 memset(tp
, 0, sizeof(*tp
));
5064 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
5065 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
5066 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
5067 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
5068 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
5071 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
5075 cp
->tcam_valid_entries
++;
5080 static int niu_init_classifier_hw(struct niu
*np
)
5082 struct niu_parent
*parent
= np
->parent
;
5083 struct niu_classifier
*cp
= &np
->clas
;
5086 nw64(H1POLY
, cp
->h1_init
);
5087 nw64(H2POLY
, cp
->h2_init
);
5089 err
= niu_init_hostinfo(np
);
5093 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
5094 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
5096 vlan_tbl_write(np
, i
, np
->port
,
5097 vp
->vlan_pref
, vp
->rdc_num
);
5100 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
5101 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
5103 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
5104 ap
->rdc_num
, ap
->mac_pref
);
5109 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
5110 int index
= i
- CLASS_CODE_USER_PROG1
;
5112 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
5115 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
5120 err
= niu_set_ip_frag_rule(np
);
5129 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
5131 nw64(ZCP_RAM_DATA0
, data
[0]);
5132 nw64(ZCP_RAM_DATA1
, data
[1]);
5133 nw64(ZCP_RAM_DATA2
, data
[2]);
5134 nw64(ZCP_RAM_DATA3
, data
[3]);
5135 nw64(ZCP_RAM_DATA4
, data
[4]);
5136 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
5138 (ZCP_RAM_ACC_WRITE
|
5139 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5140 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5142 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5146 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
5150 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5153 dev_err(np
->device
, PFX
"%s: ZCP read busy won't clear, "
5154 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
5155 (unsigned long long) nr64(ZCP_RAM_ACC
));
5161 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5162 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5164 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5167 dev_err(np
->device
, PFX
"%s: ZCP read busy2 won't clear, "
5168 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
5169 (unsigned long long) nr64(ZCP_RAM_ACC
));
5173 data
[0] = nr64(ZCP_RAM_DATA0
);
5174 data
[1] = nr64(ZCP_RAM_DATA1
);
5175 data
[2] = nr64(ZCP_RAM_DATA2
);
5176 data
[3] = nr64(ZCP_RAM_DATA3
);
5177 data
[4] = nr64(ZCP_RAM_DATA4
);
5182 static void niu_zcp_cfifo_reset(struct niu
*np
)
5184 u64 val
= nr64(RESET_CFIFO
);
5186 val
|= RESET_CFIFO_RST(np
->port
);
5187 nw64(RESET_CFIFO
, val
);
5190 val
&= ~RESET_CFIFO_RST(np
->port
);
5191 nw64(RESET_CFIFO
, val
);
5194 static int niu_init_zcp(struct niu
*np
)
5196 u64 data
[5], rbuf
[5];
5199 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5200 if (np
->port
== 0 || np
->port
== 1)
5201 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
5203 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
5205 max
= NIU_CFIFO_ENTRIES
;
5213 for (i
= 0; i
< max
; i
++) {
5214 err
= niu_zcp_write(np
, i
, data
);
5217 err
= niu_zcp_read(np
, i
, rbuf
);
5222 niu_zcp_cfifo_reset(np
);
5223 nw64(CFIFO_ECC(np
->port
), 0);
5224 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
5225 (void) nr64(ZCP_INT_STAT
);
5226 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
5231 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
5233 u64 val
= nr64_ipp(IPP_CFIG
);
5235 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
5236 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
5237 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
5238 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
5239 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
5240 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
5241 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
5242 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
5245 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
5247 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
5248 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
5249 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
5250 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
5251 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
5252 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
5255 static int niu_ipp_reset(struct niu
*np
)
5257 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
5258 1000, 100, "IPP_CFIG");
5261 static int niu_init_ipp(struct niu
*np
)
5263 u64 data
[5], rbuf
[5], val
;
5266 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5267 if (np
->port
== 0 || np
->port
== 1)
5268 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
5270 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
5272 max
= NIU_DFIFO_ENTRIES
;
5280 for (i
= 0; i
< max
; i
++) {
5281 niu_ipp_write(np
, i
, data
);
5282 niu_ipp_read(np
, i
, rbuf
);
5285 (void) nr64_ipp(IPP_INT_STAT
);
5286 (void) nr64_ipp(IPP_INT_STAT
);
5288 err
= niu_ipp_reset(np
);
5292 (void) nr64_ipp(IPP_PKT_DIS
);
5293 (void) nr64_ipp(IPP_BAD_CS_CNT
);
5294 (void) nr64_ipp(IPP_ECC
);
5296 (void) nr64_ipp(IPP_INT_STAT
);
5298 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
5300 val
= nr64_ipp(IPP_CFIG
);
5301 val
&= ~IPP_CFIG_IP_MAX_PKT
;
5302 val
|= (IPP_CFIG_IPP_ENABLE
|
5303 IPP_CFIG_DFIFO_ECC_EN
|
5304 IPP_CFIG_DROP_BAD_CRC
|
5306 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
5307 nw64_ipp(IPP_CFIG
, val
);
5312 static void niu_handle_led(struct niu
*np
, int status
)
5315 val
= nr64_mac(XMAC_CONFIG
);
5317 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
5318 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
5320 val
|= XMAC_CONFIG_LED_POLARITY
;
5321 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
5323 val
|= XMAC_CONFIG_FORCE_LED_ON
;
5324 val
&= ~XMAC_CONFIG_LED_POLARITY
;
5328 nw64_mac(XMAC_CONFIG
, val
);
5331 static void niu_init_xif_xmac(struct niu
*np
)
5333 struct niu_link_config
*lp
= &np
->link_config
;
5336 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
5337 val
= nr64(MIF_CONFIG
);
5338 val
|= MIF_CONFIG_ATCA_GE
;
5339 nw64(MIF_CONFIG
, val
);
5342 val
= nr64_mac(XMAC_CONFIG
);
5343 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5345 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
5347 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
5348 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5349 val
|= XMAC_CONFIG_LOOPBACK
;
5351 val
&= ~XMAC_CONFIG_LOOPBACK
;
5354 if (np
->flags
& NIU_FLAGS_10G
) {
5355 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
5357 val
|= XMAC_CONFIG_LFS_DISABLE
;
5358 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
5359 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
5360 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
5362 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
5365 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5367 if (lp
->active_speed
== SPEED_100
)
5368 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
5370 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
5372 nw64_mac(XMAC_CONFIG
, val
);
5374 val
= nr64_mac(XMAC_CONFIG
);
5375 val
&= ~XMAC_CONFIG_MODE_MASK
;
5376 if (np
->flags
& NIU_FLAGS_10G
) {
5377 val
|= XMAC_CONFIG_MODE_XGMII
;
5379 if (lp
->active_speed
== SPEED_1000
)
5380 val
|= XMAC_CONFIG_MODE_GMII
;
5382 val
|= XMAC_CONFIG_MODE_MII
;
5385 nw64_mac(XMAC_CONFIG
, val
);
5388 static void niu_init_xif_bmac(struct niu
*np
)
5390 struct niu_link_config
*lp
= &np
->link_config
;
5393 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
5395 if (lp
->loopback_mode
== LOOPBACK_MAC
)
5396 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
5398 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
5400 if (lp
->active_speed
== SPEED_1000
)
5401 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
5403 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
5405 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
5406 BMAC_XIF_CONFIG_LED_POLARITY
);
5408 if (!(np
->flags
& NIU_FLAGS_10G
) &&
5409 !(np
->flags
& NIU_FLAGS_FIBER
) &&
5410 lp
->active_speed
== SPEED_100
)
5411 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5413 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5415 nw64_mac(BMAC_XIF_CONFIG
, val
);
5418 static void niu_init_xif(struct niu
*np
)
5420 if (np
->flags
& NIU_FLAGS_XMAC
)
5421 niu_init_xif_xmac(np
);
5423 niu_init_xif_bmac(np
);
5426 static void niu_pcs_mii_reset(struct niu
*np
)
5429 u64 val
= nr64_pcs(PCS_MII_CTL
);
5430 val
|= PCS_MII_CTL_RST
;
5431 nw64_pcs(PCS_MII_CTL
, val
);
5432 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
5434 val
= nr64_pcs(PCS_MII_CTL
);
5438 static void niu_xpcs_reset(struct niu
*np
)
5441 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
5442 val
|= XPCS_CONTROL1_RESET
;
5443 nw64_xpcs(XPCS_CONTROL1
, val
);
5444 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
5446 val
= nr64_xpcs(XPCS_CONTROL1
);
5450 static int niu_init_pcs(struct niu
*np
)
5452 struct niu_link_config
*lp
= &np
->link_config
;
5455 switch (np
->flags
& (NIU_FLAGS_10G
|
5457 NIU_FLAGS_XCVR_SERDES
)) {
5458 case NIU_FLAGS_FIBER
:
5460 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5461 nw64_pcs(PCS_DPATH_MODE
, 0);
5462 niu_pcs_mii_reset(np
);
5466 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
5467 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
5469 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5472 /* 10G copper or fiber */
5473 val
= nr64_mac(XMAC_CONFIG
);
5474 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5475 nw64_mac(XMAC_CONFIG
, val
);
5479 val
= nr64_xpcs(XPCS_CONTROL1
);
5480 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5481 val
|= XPCS_CONTROL1_LOOPBACK
;
5483 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5484 nw64_xpcs(XPCS_CONTROL1
, val
);
5486 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5487 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5488 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5492 case NIU_FLAGS_XCVR_SERDES
:
5494 niu_pcs_mii_reset(np
);
5495 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5496 nw64_pcs(PCS_DPATH_MODE
, 0);
5501 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5502 /* 1G RGMII FIBER */
5503 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5504 niu_pcs_mii_reset(np
);
5514 static int niu_reset_tx_xmac(struct niu
*np
)
5516 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5517 (XTXMAC_SW_RST_REG_RS
|
5518 XTXMAC_SW_RST_SOFT_RST
),
5519 1000, 100, "XTXMAC_SW_RST");
5522 static int niu_reset_tx_bmac(struct niu
*np
)
5526 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5528 while (--limit
>= 0) {
5529 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5534 dev_err(np
->device
, PFX
"Port %u TX BMAC would not reset, "
5535 "BTXMAC_SW_RST[%llx]\n",
5537 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5544 static int niu_reset_tx_mac(struct niu
*np
)
5546 if (np
->flags
& NIU_FLAGS_XMAC
)
5547 return niu_reset_tx_xmac(np
);
5549 return niu_reset_tx_bmac(np
);
5552 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5556 val
= nr64_mac(XMAC_MIN
);
5557 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5558 XMAC_MIN_RX_MIN_PKT_SIZE
);
5559 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5560 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5561 nw64_mac(XMAC_MIN
, val
);
5563 nw64_mac(XMAC_MAX
, max
);
5565 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5567 val
= nr64_mac(XMAC_IPG
);
5568 if (np
->flags
& NIU_FLAGS_10G
) {
5569 val
&= ~XMAC_IPG_IPG_XGMII
;
5570 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5572 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5573 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5575 nw64_mac(XMAC_IPG
, val
);
5577 val
= nr64_mac(XMAC_CONFIG
);
5578 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5579 XMAC_CONFIG_STRETCH_MODE
|
5580 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5581 XMAC_CONFIG_TX_ENABLE
);
5582 nw64_mac(XMAC_CONFIG
, val
);
5584 nw64_mac(TXMAC_FRM_CNT
, 0);
5585 nw64_mac(TXMAC_BYTE_CNT
, 0);
5588 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5592 nw64_mac(BMAC_MIN_FRAME
, min
);
5593 nw64_mac(BMAC_MAX_FRAME
, max
);
5595 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5596 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5597 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5599 val
= nr64_mac(BTXMAC_CONFIG
);
5600 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5601 BTXMAC_CONFIG_ENABLE
);
5602 nw64_mac(BTXMAC_CONFIG
, val
);
5605 static void niu_init_tx_mac(struct niu
*np
)
5610 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5615 /* The XMAC_MIN register only accepts values for TX min which
5616 * have the low 3 bits cleared.
5618 BUILD_BUG_ON(min
& 0x7);
5620 if (np
->flags
& NIU_FLAGS_XMAC
)
5621 niu_init_tx_xmac(np
, min
, max
);
5623 niu_init_tx_bmac(np
, min
, max
);
5626 static int niu_reset_rx_xmac(struct niu
*np
)
5630 nw64_mac(XRXMAC_SW_RST
,
5631 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5633 while (--limit
>= 0) {
5634 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5635 XRXMAC_SW_RST_SOFT_RST
)))
5640 dev_err(np
->device
, PFX
"Port %u RX XMAC would not reset, "
5641 "XRXMAC_SW_RST[%llx]\n",
5643 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5650 static int niu_reset_rx_bmac(struct niu
*np
)
5654 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5656 while (--limit
>= 0) {
5657 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5662 dev_err(np
->device
, PFX
"Port %u RX BMAC would not reset, "
5663 "BRXMAC_SW_RST[%llx]\n",
5665 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5672 static int niu_reset_rx_mac(struct niu
*np
)
5674 if (np
->flags
& NIU_FLAGS_XMAC
)
5675 return niu_reset_rx_xmac(np
);
5677 return niu_reset_rx_bmac(np
);
5680 static void niu_init_rx_xmac(struct niu
*np
)
5682 struct niu_parent
*parent
= np
->parent
;
5683 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5684 int first_rdc_table
= tp
->first_table_num
;
5688 nw64_mac(XMAC_ADD_FILT0
, 0);
5689 nw64_mac(XMAC_ADD_FILT1
, 0);
5690 nw64_mac(XMAC_ADD_FILT2
, 0);
5691 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5692 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5693 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5694 nw64_mac(XMAC_HASH_TBL(i
), 0);
5695 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5696 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5697 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5699 val
= nr64_mac(XMAC_CONFIG
);
5700 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5701 XMAC_CONFIG_PROMISCUOUS
|
5702 XMAC_CONFIG_PROMISC_GROUP
|
5703 XMAC_CONFIG_ERR_CHK_DIS
|
5704 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5705 XMAC_CONFIG_RESERVED_MULTICAST
|
5706 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5707 XMAC_CONFIG_ADDR_FILTER_EN
|
5708 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5709 XMAC_CONFIG_STRIP_CRC
|
5710 XMAC_CONFIG_PASS_FLOW_CTRL
|
5711 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5712 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5713 nw64_mac(XMAC_CONFIG
, val
);
5715 nw64_mac(RXMAC_BT_CNT
, 0);
5716 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5717 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5718 nw64_mac(RXMAC_FRAG_CNT
, 0);
5719 nw64_mac(RXMAC_HIST_CNT1
, 0);
5720 nw64_mac(RXMAC_HIST_CNT2
, 0);
5721 nw64_mac(RXMAC_HIST_CNT3
, 0);
5722 nw64_mac(RXMAC_HIST_CNT4
, 0);
5723 nw64_mac(RXMAC_HIST_CNT5
, 0);
5724 nw64_mac(RXMAC_HIST_CNT6
, 0);
5725 nw64_mac(RXMAC_HIST_CNT7
, 0);
5726 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5727 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5728 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5729 nw64_mac(LINK_FAULT_CNT
, 0);
5732 static void niu_init_rx_bmac(struct niu
*np
)
5734 struct niu_parent
*parent
= np
->parent
;
5735 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5736 int first_rdc_table
= tp
->first_table_num
;
5740 nw64_mac(BMAC_ADD_FILT0
, 0);
5741 nw64_mac(BMAC_ADD_FILT1
, 0);
5742 nw64_mac(BMAC_ADD_FILT2
, 0);
5743 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5744 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5745 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5746 nw64_mac(BMAC_HASH_TBL(i
), 0);
5747 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5748 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5749 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5751 val
= nr64_mac(BRXMAC_CONFIG
);
5752 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5753 BRXMAC_CONFIG_STRIP_PAD
|
5754 BRXMAC_CONFIG_STRIP_FCS
|
5755 BRXMAC_CONFIG_PROMISC
|
5756 BRXMAC_CONFIG_PROMISC_GRP
|
5757 BRXMAC_CONFIG_ADDR_FILT_EN
|
5758 BRXMAC_CONFIG_DISCARD_DIS
);
5759 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5760 nw64_mac(BRXMAC_CONFIG
, val
);
5762 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5763 val
|= BMAC_ADDR_CMPEN_EN0
;
5764 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5767 static void niu_init_rx_mac(struct niu
*np
)
5769 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5771 if (np
->flags
& NIU_FLAGS_XMAC
)
5772 niu_init_rx_xmac(np
);
5774 niu_init_rx_bmac(np
);
5777 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5779 u64 val
= nr64_mac(XMAC_CONFIG
);
5782 val
|= XMAC_CONFIG_TX_ENABLE
;
5784 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5785 nw64_mac(XMAC_CONFIG
, val
);
5788 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5790 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5793 val
|= BTXMAC_CONFIG_ENABLE
;
5795 val
&= ~BTXMAC_CONFIG_ENABLE
;
5796 nw64_mac(BTXMAC_CONFIG
, val
);
5799 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5801 if (np
->flags
& NIU_FLAGS_XMAC
)
5802 niu_enable_tx_xmac(np
, on
);
5804 niu_enable_tx_bmac(np
, on
);
5807 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5809 u64 val
= nr64_mac(XMAC_CONFIG
);
5811 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5812 XMAC_CONFIG_PROMISCUOUS
);
5814 if (np
->flags
& NIU_FLAGS_MCAST
)
5815 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5816 if (np
->flags
& NIU_FLAGS_PROMISC
)
5817 val
|= XMAC_CONFIG_PROMISCUOUS
;
5820 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5822 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5823 nw64_mac(XMAC_CONFIG
, val
);
5826 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5828 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5830 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5831 BRXMAC_CONFIG_PROMISC
);
5833 if (np
->flags
& NIU_FLAGS_MCAST
)
5834 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5835 if (np
->flags
& NIU_FLAGS_PROMISC
)
5836 val
|= BRXMAC_CONFIG_PROMISC
;
5839 val
|= BRXMAC_CONFIG_ENABLE
;
5841 val
&= ~BRXMAC_CONFIG_ENABLE
;
5842 nw64_mac(BRXMAC_CONFIG
, val
);
5845 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5847 if (np
->flags
& NIU_FLAGS_XMAC
)
5848 niu_enable_rx_xmac(np
, on
);
5850 niu_enable_rx_bmac(np
, on
);
5853 static int niu_init_mac(struct niu
*np
)
5858 err
= niu_init_pcs(np
);
5862 err
= niu_reset_tx_mac(np
);
5865 niu_init_tx_mac(np
);
5866 err
= niu_reset_rx_mac(np
);
5869 niu_init_rx_mac(np
);
5871 /* This looks hookey but the RX MAC reset we just did will
5872 * undo some of the state we setup in niu_init_tx_mac() so we
5873 * have to call it again. In particular, the RX MAC reset will
5874 * set the XMAC_MAX register back to it's default value.
5876 niu_init_tx_mac(np
);
5877 niu_enable_tx_mac(np
, 1);
5879 niu_enable_rx_mac(np
, 1);
5884 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5886 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5889 static void niu_stop_tx_channels(struct niu
*np
)
5893 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5894 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5896 niu_stop_one_tx_channel(np
, rp
);
5900 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5902 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5905 static void niu_reset_tx_channels(struct niu
*np
)
5909 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5910 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5912 niu_reset_one_tx_channel(np
, rp
);
5916 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5918 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5921 static void niu_stop_rx_channels(struct niu
*np
)
5925 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5926 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5928 niu_stop_one_rx_channel(np
, rp
);
5932 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5934 int channel
= rp
->rx_channel
;
5936 (void) niu_rx_channel_reset(np
, channel
);
5937 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5938 nw64(RX_DMA_CTL_STAT(channel
), 0);
5939 (void) niu_enable_rx_channel(np
, channel
, 0);
5942 static void niu_reset_rx_channels(struct niu
*np
)
5946 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5947 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5949 niu_reset_one_rx_channel(np
, rp
);
5953 static void niu_disable_ipp(struct niu
*np
)
5958 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5959 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5961 while (--limit
>= 0 && (rd
!= wr
)) {
5962 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5963 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5966 (rd
!= 0 && wr
!= 1)) {
5967 dev_err(np
->device
, PFX
"%s: IPP would not quiesce, "
5968 "rd_ptr[%llx] wr_ptr[%llx]\n",
5970 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR
),
5971 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR
));
5974 val
= nr64_ipp(IPP_CFIG
);
5975 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5976 IPP_CFIG_DFIFO_ECC_EN
|
5977 IPP_CFIG_DROP_BAD_CRC
|
5979 nw64_ipp(IPP_CFIG
, val
);
5981 (void) niu_ipp_reset(np
);
5984 static int niu_init_hw(struct niu
*np
)
5988 niudbg(IFUP
, "%s: Initialize TXC\n", np
->dev
->name
);
5989 niu_txc_enable_port(np
, 1);
5990 niu_txc_port_dma_enable(np
, 1);
5991 niu_txc_set_imask(np
, 0);
5993 niudbg(IFUP
, "%s: Initialize TX channels\n", np
->dev
->name
);
5994 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5995 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5997 err
= niu_init_one_tx_channel(np
, rp
);
6002 niudbg(IFUP
, "%s: Initialize RX channels\n", np
->dev
->name
);
6003 err
= niu_init_rx_channels(np
);
6005 goto out_uninit_tx_channels
;
6007 niudbg(IFUP
, "%s: Initialize classifier\n", np
->dev
->name
);
6008 err
= niu_init_classifier_hw(np
);
6010 goto out_uninit_rx_channels
;
6012 niudbg(IFUP
, "%s: Initialize ZCP\n", np
->dev
->name
);
6013 err
= niu_init_zcp(np
);
6015 goto out_uninit_rx_channels
;
6017 niudbg(IFUP
, "%s: Initialize IPP\n", np
->dev
->name
);
6018 err
= niu_init_ipp(np
);
6020 goto out_uninit_rx_channels
;
6022 niudbg(IFUP
, "%s: Initialize MAC\n", np
->dev
->name
);
6023 err
= niu_init_mac(np
);
6025 goto out_uninit_ipp
;
6030 niudbg(IFUP
, "%s: Uninit IPP\n", np
->dev
->name
);
6031 niu_disable_ipp(np
);
6033 out_uninit_rx_channels
:
6034 niudbg(IFUP
, "%s: Uninit RX channels\n", np
->dev
->name
);
6035 niu_stop_rx_channels(np
);
6036 niu_reset_rx_channels(np
);
6038 out_uninit_tx_channels
:
6039 niudbg(IFUP
, "%s: Uninit TX channels\n", np
->dev
->name
);
6040 niu_stop_tx_channels(np
);
6041 niu_reset_tx_channels(np
);
6046 static void niu_stop_hw(struct niu
*np
)
6048 niudbg(IFDOWN
, "%s: Disable interrupts\n", np
->dev
->name
);
6049 niu_enable_interrupts(np
, 0);
6051 niudbg(IFDOWN
, "%s: Disable RX MAC\n", np
->dev
->name
);
6052 niu_enable_rx_mac(np
, 0);
6054 niudbg(IFDOWN
, "%s: Disable IPP\n", np
->dev
->name
);
6055 niu_disable_ipp(np
);
6057 niudbg(IFDOWN
, "%s: Stop TX channels\n", np
->dev
->name
);
6058 niu_stop_tx_channels(np
);
6060 niudbg(IFDOWN
, "%s: Stop RX channels\n", np
->dev
->name
);
6061 niu_stop_rx_channels(np
);
6063 niudbg(IFDOWN
, "%s: Reset TX channels\n", np
->dev
->name
);
6064 niu_reset_tx_channels(np
);
6066 niudbg(IFDOWN
, "%s: Reset RX channels\n", np
->dev
->name
);
6067 niu_reset_rx_channels(np
);
6070 static void niu_set_irq_name(struct niu
*np
)
6072 int port
= np
->port
;
6075 sprintf(np
->irq_name
[0], "%s:MAC", np
->dev
->name
);
6078 sprintf(np
->irq_name
[1], "%s:MIF", np
->dev
->name
);
6079 sprintf(np
->irq_name
[2], "%s:SYSERR", np
->dev
->name
);
6083 for (i
= 0; i
< np
->num_ldg
- j
; i
++) {
6084 if (i
< np
->num_rx_rings
)
6085 sprintf(np
->irq_name
[i
+j
], "%s-rx-%d",
6087 else if (i
< np
->num_tx_rings
+ np
->num_rx_rings
)
6088 sprintf(np
->irq_name
[i
+j
], "%s-tx-%d", np
->dev
->name
,
6089 i
- np
->num_rx_rings
);
6093 static int niu_request_irq(struct niu
*np
)
6097 niu_set_irq_name(np
);
6100 for (i
= 0; i
< np
->num_ldg
; i
++) {
6101 struct niu_ldg
*lp
= &np
->ldg
[i
];
6103 err
= request_irq(lp
->irq
, niu_interrupt
,
6104 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
6105 np
->irq_name
[i
], lp
);
6114 for (j
= 0; j
< i
; j
++) {
6115 struct niu_ldg
*lp
= &np
->ldg
[j
];
6117 free_irq(lp
->irq
, lp
);
6122 static void niu_free_irq(struct niu
*np
)
6126 for (i
= 0; i
< np
->num_ldg
; i
++) {
6127 struct niu_ldg
*lp
= &np
->ldg
[i
];
6129 free_irq(lp
->irq
, lp
);
6133 static void niu_enable_napi(struct niu
*np
)
6137 for (i
= 0; i
< np
->num_ldg
; i
++)
6138 napi_enable(&np
->ldg
[i
].napi
);
6141 static void niu_disable_napi(struct niu
*np
)
6145 for (i
= 0; i
< np
->num_ldg
; i
++)
6146 napi_disable(&np
->ldg
[i
].napi
);
6149 static int niu_open(struct net_device
*dev
)
6151 struct niu
*np
= netdev_priv(dev
);
6154 netif_carrier_off(dev
);
6156 err
= niu_alloc_channels(np
);
6160 err
= niu_enable_interrupts(np
, 0);
6162 goto out_free_channels
;
6164 err
= niu_request_irq(np
);
6166 goto out_free_channels
;
6168 niu_enable_napi(np
);
6170 spin_lock_irq(&np
->lock
);
6172 err
= niu_init_hw(np
);
6174 init_timer(&np
->timer
);
6175 np
->timer
.expires
= jiffies
+ HZ
;
6176 np
->timer
.data
= (unsigned long) np
;
6177 np
->timer
.function
= niu_timer
;
6179 err
= niu_enable_interrupts(np
, 1);
6184 spin_unlock_irq(&np
->lock
);
6187 niu_disable_napi(np
);
6191 netif_tx_start_all_queues(dev
);
6193 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6194 netif_carrier_on(dev
);
6196 add_timer(&np
->timer
);
6204 niu_free_channels(np
);
6210 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
6212 cancel_work_sync(&np
->reset_task
);
6214 niu_disable_napi(np
);
6215 netif_tx_stop_all_queues(dev
);
6217 del_timer_sync(&np
->timer
);
6219 spin_lock_irq(&np
->lock
);
6223 spin_unlock_irq(&np
->lock
);
6226 static int niu_close(struct net_device
*dev
)
6228 struct niu
*np
= netdev_priv(dev
);
6230 niu_full_shutdown(np
, dev
);
6234 niu_free_channels(np
);
6236 niu_handle_led(np
, 0);
6241 static void niu_sync_xmac_stats(struct niu
*np
)
6243 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
6245 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
6246 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
6248 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
6249 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
6250 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
6251 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
6252 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
6253 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
6254 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
6255 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
6256 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
6257 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
6258 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
6259 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
6260 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
6261 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
6262 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
6263 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
6266 static void niu_sync_bmac_stats(struct niu
*np
)
6268 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
6270 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
6271 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
6273 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
6274 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6275 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6276 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
6279 static void niu_sync_mac_stats(struct niu
*np
)
6281 if (np
->flags
& NIU_FLAGS_XMAC
)
6282 niu_sync_xmac_stats(np
);
6284 niu_sync_bmac_stats(np
);
6287 static void niu_get_rx_stats(struct niu
*np
)
6289 unsigned long pkts
, dropped
, errors
, bytes
;
6292 pkts
= dropped
= errors
= bytes
= 0;
6293 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6294 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6296 niu_sync_rx_discard_stats(np
, rp
, 0);
6298 pkts
+= rp
->rx_packets
;
6299 bytes
+= rp
->rx_bytes
;
6300 dropped
+= rp
->rx_dropped
;
6301 errors
+= rp
->rx_errors
;
6303 np
->dev
->stats
.rx_packets
= pkts
;
6304 np
->dev
->stats
.rx_bytes
= bytes
;
6305 np
->dev
->stats
.rx_dropped
= dropped
;
6306 np
->dev
->stats
.rx_errors
= errors
;
6309 static void niu_get_tx_stats(struct niu
*np
)
6311 unsigned long pkts
, errors
, bytes
;
6314 pkts
= errors
= bytes
= 0;
6315 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6316 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6318 pkts
+= rp
->tx_packets
;
6319 bytes
+= rp
->tx_bytes
;
6320 errors
+= rp
->tx_errors
;
6322 np
->dev
->stats
.tx_packets
= pkts
;
6323 np
->dev
->stats
.tx_bytes
= bytes
;
6324 np
->dev
->stats
.tx_errors
= errors
;
6327 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
6329 struct niu
*np
= netdev_priv(dev
);
6331 niu_get_rx_stats(np
);
6332 niu_get_tx_stats(np
);
6337 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
6341 for (i
= 0; i
< 16; i
++)
6342 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
6345 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
6349 for (i
= 0; i
< 16; i
++)
6350 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
6353 static void niu_load_hash(struct niu
*np
, u16
*hash
)
6355 if (np
->flags
& NIU_FLAGS_XMAC
)
6356 niu_load_hash_xmac(np
, hash
);
6358 niu_load_hash_bmac(np
, hash
);
6361 static void niu_set_rx_mode(struct net_device
*dev
)
6363 struct niu
*np
= netdev_priv(dev
);
6364 int i
, alt_cnt
, err
;
6365 struct dev_addr_list
*addr
;
6366 struct netdev_hw_addr
*ha
;
6367 unsigned long flags
;
6368 u16 hash
[16] = { 0, };
6370 spin_lock_irqsave(&np
->lock
, flags
);
6371 niu_enable_rx_mac(np
, 0);
6373 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
6374 if (dev
->flags
& IFF_PROMISC
)
6375 np
->flags
|= NIU_FLAGS_PROMISC
;
6376 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 0))
6377 np
->flags
|= NIU_FLAGS_MCAST
;
6379 alt_cnt
= dev
->uc
.count
;
6380 if (alt_cnt
> niu_num_alt_addr(np
)) {
6382 np
->flags
|= NIU_FLAGS_PROMISC
;
6388 list_for_each_entry(ha
, &dev
->uc
.list
, list
) {
6389 err
= niu_set_alt_mac(np
, index
, ha
->addr
);
6391 printk(KERN_WARNING PFX
"%s: Error %d "
6392 "adding alt mac %d\n",
6393 dev
->name
, err
, index
);
6394 err
= niu_enable_alt_mac(np
, index
, 1);
6396 printk(KERN_WARNING PFX
"%s: Error %d "
6397 "enabling alt mac %d\n",
6398 dev
->name
, err
, index
);
6404 if (np
->flags
& NIU_FLAGS_XMAC
)
6408 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
6409 err
= niu_enable_alt_mac(np
, i
, 0);
6411 printk(KERN_WARNING PFX
"%s: Error %d "
6412 "disabling alt mac %d\n",
6416 if (dev
->flags
& IFF_ALLMULTI
) {
6417 for (i
= 0; i
< 16; i
++)
6419 } else if (dev
->mc_count
> 0) {
6420 for (addr
= dev
->mc_list
; addr
; addr
= addr
->next
) {
6421 u32 crc
= ether_crc_le(ETH_ALEN
, addr
->da_addr
);
6424 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
6428 if (np
->flags
& NIU_FLAGS_MCAST
)
6429 niu_load_hash(np
, hash
);
6431 niu_enable_rx_mac(np
, 1);
6432 spin_unlock_irqrestore(&np
->lock
, flags
);
6435 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
6437 struct niu
*np
= netdev_priv(dev
);
6438 struct sockaddr
*addr
= p
;
6439 unsigned long flags
;
6441 if (!is_valid_ether_addr(addr
->sa_data
))
6444 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
6446 if (!netif_running(dev
))
6449 spin_lock_irqsave(&np
->lock
, flags
);
6450 niu_enable_rx_mac(np
, 0);
6451 niu_set_primary_mac(np
, dev
->dev_addr
);
6452 niu_enable_rx_mac(np
, 1);
6453 spin_unlock_irqrestore(&np
->lock
, flags
);
6458 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6463 static void niu_netif_stop(struct niu
*np
)
6465 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6467 niu_disable_napi(np
);
6469 netif_tx_disable(np
->dev
);
6472 static void niu_netif_start(struct niu
*np
)
6474 /* NOTE: unconditional netif_wake_queue is only appropriate
6475 * so long as all callers are assured to have free tx slots
6476 * (such as after niu_init_hw).
6478 netif_tx_wake_all_queues(np
->dev
);
6480 niu_enable_napi(np
);
6482 niu_enable_interrupts(np
, 1);
6485 static void niu_reset_buffers(struct niu
*np
)
6490 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6491 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6493 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
6496 page
= rp
->rxhash
[j
];
6499 (struct page
*) page
->mapping
;
6500 u64 base
= page
->index
;
6501 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6502 rp
->rbr
[k
++] = cpu_to_le32(base
);
6506 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6507 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6512 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6514 rp
->rbr_pending
= 0;
6515 rp
->rbr_refill_pending
= 0;
6519 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6520 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6522 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6523 if (rp
->tx_buffs
[j
].skb
)
6524 (void) release_tx_packet(np
, rp
, j
);
6527 rp
->pending
= MAX_TX_RING_SIZE
;
6535 static void niu_reset_task(struct work_struct
*work
)
6537 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6538 unsigned long flags
;
6541 spin_lock_irqsave(&np
->lock
, flags
);
6542 if (!netif_running(np
->dev
)) {
6543 spin_unlock_irqrestore(&np
->lock
, flags
);
6547 spin_unlock_irqrestore(&np
->lock
, flags
);
6549 del_timer_sync(&np
->timer
);
6553 spin_lock_irqsave(&np
->lock
, flags
);
6557 spin_unlock_irqrestore(&np
->lock
, flags
);
6559 niu_reset_buffers(np
);
6561 spin_lock_irqsave(&np
->lock
, flags
);
6563 err
= niu_init_hw(np
);
6565 np
->timer
.expires
= jiffies
+ HZ
;
6566 add_timer(&np
->timer
);
6567 niu_netif_start(np
);
6570 spin_unlock_irqrestore(&np
->lock
, flags
);
6573 static void niu_tx_timeout(struct net_device
*dev
)
6575 struct niu
*np
= netdev_priv(dev
);
6577 dev_err(np
->device
, PFX
"%s: Transmit timed out, resetting\n",
6580 schedule_work(&np
->reset_task
);
6583 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6584 u64 mapping
, u64 len
, u64 mark
,
6587 __le64
*desc
= &rp
->descr
[index
];
6589 *desc
= cpu_to_le64(mark
|
6590 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6591 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6592 (mapping
& TX_DESC_SAD
));
6595 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6596 u64 pad_bytes
, u64 len
)
6598 u16 eth_proto
, eth_proto_inner
;
6599 u64 csum_bits
, l3off
, ihl
, ret
;
6603 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6604 eth_proto_inner
= eth_proto
;
6605 if (eth_proto
== ETH_P_8021Q
) {
6606 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6607 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6609 eth_proto_inner
= be16_to_cpu(val
);
6613 switch (skb
->protocol
) {
6614 case cpu_to_be16(ETH_P_IP
):
6615 ip_proto
= ip_hdr(skb
)->protocol
;
6616 ihl
= ip_hdr(skb
)->ihl
;
6618 case cpu_to_be16(ETH_P_IPV6
):
6619 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6628 csum_bits
= TXHDR_CSUM_NONE
;
6629 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6632 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6634 (ip_proto
== IPPROTO_UDP
?
6635 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6637 start
= skb_transport_offset(skb
) -
6638 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6639 stuff
= start
+ skb
->csum_offset
;
6641 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6642 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6645 l3off
= skb_network_offset(skb
) -
6646 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6648 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6649 (len
<< TXHDR_LEN_SHIFT
) |
6650 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6651 (ihl
<< TXHDR_IHL_SHIFT
) |
6652 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6653 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6654 (ipv6
? TXHDR_IP_VER
: 0) |
6660 static int niu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6662 struct niu
*np
= netdev_priv(dev
);
6663 unsigned long align
, headroom
;
6664 struct netdev_queue
*txq
;
6665 struct tx_ring_info
*rp
;
6666 struct tx_pkt_hdr
*tp
;
6667 unsigned int len
, nfg
;
6668 struct ethhdr
*ehdr
;
6672 i
= skb_get_queue_mapping(skb
);
6673 rp
= &np
->tx_rings
[i
];
6674 txq
= netdev_get_tx_queue(dev
, i
);
6676 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6677 netif_tx_stop_queue(txq
);
6678 dev_err(np
->device
, PFX
"%s: BUG! Tx ring full when "
6679 "queue awake!\n", dev
->name
);
6681 return NETDEV_TX_BUSY
;
6684 if (skb
->len
< ETH_ZLEN
) {
6685 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6687 if (skb_pad(skb
, pad_bytes
))
6689 skb_put(skb
, pad_bytes
);
6692 len
= sizeof(struct tx_pkt_hdr
) + 15;
6693 if (skb_headroom(skb
) < len
) {
6694 struct sk_buff
*skb_new
;
6696 skb_new
= skb_realloc_headroom(skb
, len
);
6706 align
= ((unsigned long) skb
->data
& (16 - 1));
6707 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6709 ehdr
= (struct ethhdr
*) skb
->data
;
6710 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6712 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6713 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6716 len
= skb_headlen(skb
);
6717 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6718 len
, DMA_TO_DEVICE
);
6722 rp
->tx_buffs
[prod
].skb
= skb
;
6723 rp
->tx_buffs
[prod
].mapping
= mapping
;
6726 if (++rp
->mark_counter
== rp
->mark_freq
) {
6727 rp
->mark_counter
= 0;
6728 mrk
|= TX_DESC_MARK
;
6733 nfg
= skb_shinfo(skb
)->nr_frags
;
6735 tlen
-= MAX_TX_DESC_LEN
;
6740 unsigned int this_len
= len
;
6742 if (this_len
> MAX_TX_DESC_LEN
)
6743 this_len
= MAX_TX_DESC_LEN
;
6745 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6748 prod
= NEXT_TX(rp
, prod
);
6749 mapping
+= this_len
;
6753 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6754 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6757 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6758 frag
->page_offset
, len
,
6761 rp
->tx_buffs
[prod
].skb
= NULL
;
6762 rp
->tx_buffs
[prod
].mapping
= mapping
;
6764 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6766 prod
= NEXT_TX(rp
, prod
);
6769 if (prod
< rp
->prod
)
6770 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6773 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6775 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6776 netif_tx_stop_queue(txq
);
6777 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6778 netif_tx_wake_queue(txq
);
6782 return NETDEV_TX_OK
;
6790 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6792 struct niu
*np
= netdev_priv(dev
);
6793 int err
, orig_jumbo
, new_jumbo
;
6795 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6798 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6799 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6803 if (!netif_running(dev
) ||
6804 (orig_jumbo
== new_jumbo
))
6807 niu_full_shutdown(np
, dev
);
6809 niu_free_channels(np
);
6811 niu_enable_napi(np
);
6813 err
= niu_alloc_channels(np
);
6817 spin_lock_irq(&np
->lock
);
6819 err
= niu_init_hw(np
);
6821 init_timer(&np
->timer
);
6822 np
->timer
.expires
= jiffies
+ HZ
;
6823 np
->timer
.data
= (unsigned long) np
;
6824 np
->timer
.function
= niu_timer
;
6826 err
= niu_enable_interrupts(np
, 1);
6831 spin_unlock_irq(&np
->lock
);
6834 netif_tx_start_all_queues(dev
);
6835 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6836 netif_carrier_on(dev
);
6838 add_timer(&np
->timer
);
6844 static void niu_get_drvinfo(struct net_device
*dev
,
6845 struct ethtool_drvinfo
*info
)
6847 struct niu
*np
= netdev_priv(dev
);
6848 struct niu_vpd
*vpd
= &np
->vpd
;
6850 strcpy(info
->driver
, DRV_MODULE_NAME
);
6851 strcpy(info
->version
, DRV_MODULE_VERSION
);
6852 sprintf(info
->fw_version
, "%d.%d",
6853 vpd
->fcode_major
, vpd
->fcode_minor
);
6854 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6855 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6858 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6860 struct niu
*np
= netdev_priv(dev
);
6861 struct niu_link_config
*lp
;
6863 lp
= &np
->link_config
;
6865 memset(cmd
, 0, sizeof(*cmd
));
6866 cmd
->phy_address
= np
->phy_addr
;
6867 cmd
->supported
= lp
->supported
;
6868 cmd
->advertising
= lp
->active_advertising
;
6869 cmd
->autoneg
= lp
->active_autoneg
;
6870 cmd
->speed
= lp
->active_speed
;
6871 cmd
->duplex
= lp
->active_duplex
;
6872 cmd
->port
= (np
->flags
& NIU_FLAGS_FIBER
) ? PORT_FIBRE
: PORT_TP
;
6873 cmd
->transceiver
= (np
->flags
& NIU_FLAGS_XCVR_SERDES
) ?
6874 XCVR_EXTERNAL
: XCVR_INTERNAL
;
6879 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6881 struct niu
*np
= netdev_priv(dev
);
6882 struct niu_link_config
*lp
= &np
->link_config
;
6884 lp
->advertising
= cmd
->advertising
;
6885 lp
->speed
= cmd
->speed
;
6886 lp
->duplex
= cmd
->duplex
;
6887 lp
->autoneg
= cmd
->autoneg
;
6888 return niu_init_link(np
);
6891 static u32
niu_get_msglevel(struct net_device
*dev
)
6893 struct niu
*np
= netdev_priv(dev
);
6894 return np
->msg_enable
;
6897 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6899 struct niu
*np
= netdev_priv(dev
);
6900 np
->msg_enable
= value
;
6903 static int niu_nway_reset(struct net_device
*dev
)
6905 struct niu
*np
= netdev_priv(dev
);
6907 if (np
->link_config
.autoneg
)
6908 return niu_init_link(np
);
6913 static int niu_get_eeprom_len(struct net_device
*dev
)
6915 struct niu
*np
= netdev_priv(dev
);
6917 return np
->eeprom_len
;
6920 static int niu_get_eeprom(struct net_device
*dev
,
6921 struct ethtool_eeprom
*eeprom
, u8
*data
)
6923 struct niu
*np
= netdev_priv(dev
);
6924 u32 offset
, len
, val
;
6926 offset
= eeprom
->offset
;
6929 if (offset
+ len
< offset
)
6931 if (offset
>= np
->eeprom_len
)
6933 if (offset
+ len
> np
->eeprom_len
)
6934 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6937 u32 b_offset
, b_count
;
6939 b_offset
= offset
& 3;
6940 b_count
= 4 - b_offset
;
6944 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6945 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6951 val
= nr64(ESPC_NCR(offset
/ 4));
6952 memcpy(data
, &val
, 4);
6958 val
= nr64(ESPC_NCR(offset
/ 4));
6959 memcpy(data
, &val
, len
);
6964 static void niu_ethflow_to_l3proto(int flow_type
, u8
*pid
)
6966 switch (flow_type
) {
6977 *pid
= IPPROTO_SCTP
;
6993 static int niu_class_to_ethflow(u64
class, int *flow_type
)
6996 case CLASS_CODE_TCP_IPV4
:
6997 *flow_type
= TCP_V4_FLOW
;
6999 case CLASS_CODE_UDP_IPV4
:
7000 *flow_type
= UDP_V4_FLOW
;
7002 case CLASS_CODE_AH_ESP_IPV4
:
7003 *flow_type
= AH_V4_FLOW
;
7005 case CLASS_CODE_SCTP_IPV4
:
7006 *flow_type
= SCTP_V4_FLOW
;
7008 case CLASS_CODE_TCP_IPV6
:
7009 *flow_type
= TCP_V6_FLOW
;
7011 case CLASS_CODE_UDP_IPV6
:
7012 *flow_type
= UDP_V6_FLOW
;
7014 case CLASS_CODE_AH_ESP_IPV6
:
7015 *flow_type
= AH_V6_FLOW
;
7017 case CLASS_CODE_SCTP_IPV6
:
7018 *flow_type
= SCTP_V6_FLOW
;
7020 case CLASS_CODE_USER_PROG1
:
7021 case CLASS_CODE_USER_PROG2
:
7022 case CLASS_CODE_USER_PROG3
:
7023 case CLASS_CODE_USER_PROG4
:
7024 *flow_type
= IP_USER_FLOW
;
7033 static int niu_ethflow_to_class(int flow_type
, u64
*class)
7035 switch (flow_type
) {
7037 *class = CLASS_CODE_TCP_IPV4
;
7040 *class = CLASS_CODE_UDP_IPV4
;
7044 *class = CLASS_CODE_AH_ESP_IPV4
;
7047 *class = CLASS_CODE_SCTP_IPV4
;
7050 *class = CLASS_CODE_TCP_IPV6
;
7053 *class = CLASS_CODE_UDP_IPV6
;
7057 *class = CLASS_CODE_AH_ESP_IPV6
;
7060 *class = CLASS_CODE_SCTP_IPV6
;
7069 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
7073 if (flow_key
& FLOW_KEY_L2DA
)
7074 ethflow
|= RXH_L2DA
;
7075 if (flow_key
& FLOW_KEY_VLAN
)
7076 ethflow
|= RXH_VLAN
;
7077 if (flow_key
& FLOW_KEY_IPSA
)
7078 ethflow
|= RXH_IP_SRC
;
7079 if (flow_key
& FLOW_KEY_IPDA
)
7080 ethflow
|= RXH_IP_DST
;
7081 if (flow_key
& FLOW_KEY_PROTO
)
7082 ethflow
|= RXH_L3_PROTO
;
7083 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
7084 ethflow
|= RXH_L4_B_0_1
;
7085 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
7086 ethflow
|= RXH_L4_B_2_3
;
7092 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
7096 if (ethflow
& RXH_L2DA
)
7097 key
|= FLOW_KEY_L2DA
;
7098 if (ethflow
& RXH_VLAN
)
7099 key
|= FLOW_KEY_VLAN
;
7100 if (ethflow
& RXH_IP_SRC
)
7101 key
|= FLOW_KEY_IPSA
;
7102 if (ethflow
& RXH_IP_DST
)
7103 key
|= FLOW_KEY_IPDA
;
7104 if (ethflow
& RXH_L3_PROTO
)
7105 key
|= FLOW_KEY_PROTO
;
7106 if (ethflow
& RXH_L4_B_0_1
)
7107 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
7108 if (ethflow
& RXH_L4_B_2_3
)
7109 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
7117 static int niu_get_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7123 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7126 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7128 nfc
->data
= RXH_DISCARD
;
7130 nfc
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
7131 CLASS_CODE_USER_PROG1
]);
7135 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry
*tp
,
7136 struct ethtool_rx_flow_spec
*fsp
)
7139 fsp
->h_u
.tcp_ip4_spec
.ip4src
= (tp
->key
[3] & TCAM_V4KEY3_SADDR
) >>
7140 TCAM_V4KEY3_SADDR_SHIFT
;
7141 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= (tp
->key
[3] & TCAM_V4KEY3_DADDR
) >>
7142 TCAM_V4KEY3_DADDR_SHIFT
;
7143 fsp
->m_u
.tcp_ip4_spec
.ip4src
= (tp
->key_mask
[3] & TCAM_V4KEY3_SADDR
) >>
7144 TCAM_V4KEY3_SADDR_SHIFT
;
7145 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= (tp
->key_mask
[3] & TCAM_V4KEY3_DADDR
) >>
7146 TCAM_V4KEY3_DADDR_SHIFT
;
7148 fsp
->h_u
.tcp_ip4_spec
.ip4src
=
7149 cpu_to_be32(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7150 fsp
->m_u
.tcp_ip4_spec
.ip4src
=
7151 cpu_to_be32(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7152 fsp
->h_u
.tcp_ip4_spec
.ip4dst
=
7153 cpu_to_be32(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7154 fsp
->m_u
.tcp_ip4_spec
.ip4dst
=
7155 cpu_to_be32(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7157 fsp
->h_u
.tcp_ip4_spec
.tos
= (tp
->key
[2] & TCAM_V4KEY2_TOS
) >>
7158 TCAM_V4KEY2_TOS_SHIFT
;
7159 fsp
->m_u
.tcp_ip4_spec
.tos
= (tp
->key_mask
[2] & TCAM_V4KEY2_TOS
) >>
7160 TCAM_V4KEY2_TOS_SHIFT
;
7162 switch (fsp
->flow_type
) {
7166 fsp
->h_u
.tcp_ip4_spec
.psrc
=
7167 ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7168 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7169 fsp
->h_u
.tcp_ip4_spec
.pdst
=
7170 ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7171 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7172 fsp
->m_u
.tcp_ip4_spec
.psrc
=
7173 ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7174 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7175 fsp
->m_u
.tcp_ip4_spec
.pdst
=
7176 ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7177 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7179 fsp
->h_u
.tcp_ip4_spec
.psrc
=
7180 cpu_to_be16(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7181 fsp
->h_u
.tcp_ip4_spec
.pdst
=
7182 cpu_to_be16(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7183 fsp
->m_u
.tcp_ip4_spec
.psrc
=
7184 cpu_to_be16(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7185 fsp
->m_u
.tcp_ip4_spec
.pdst
=
7186 cpu_to_be16(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7190 fsp
->h_u
.ah_ip4_spec
.spi
=
7191 (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7192 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7193 fsp
->m_u
.ah_ip4_spec
.spi
=
7194 (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7195 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7197 fsp
->h_u
.ah_ip4_spec
.spi
=
7198 cpu_to_be32(fsp
->h_u
.ah_ip4_spec
.spi
);
7199 fsp
->m_u
.ah_ip4_spec
.spi
=
7200 cpu_to_be32(fsp
->m_u
.ah_ip4_spec
.spi
);
7203 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
=
7204 (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7205 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7206 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
=
7207 (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7208 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7210 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
=
7211 cpu_to_be32(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7212 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
=
7213 cpu_to_be32(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7215 fsp
->h_u
.usr_ip4_spec
.proto
=
7216 (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7217 TCAM_V4KEY2_PROTO_SHIFT
;
7218 fsp
->m_u
.usr_ip4_spec
.proto
=
7219 (tp
->key_mask
[2] & TCAM_V4KEY2_PROTO
) >>
7220 TCAM_V4KEY2_PROTO_SHIFT
;
7222 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
7229 static int niu_get_ethtool_tcam_entry(struct niu
*np
,
7230 struct ethtool_rxnfc
*nfc
)
7232 struct niu_parent
*parent
= np
->parent
;
7233 struct niu_tcam_entry
*tp
;
7234 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7239 idx
= tcam_get_index(np
, (u16
)nfc
->fs
.location
);
7241 tp
= &parent
->tcam
[idx
];
7243 pr_info(PFX
"niu%d: %s entry [%d] invalid for idx[%d]\n",
7244 parent
->index
, np
->dev
->name
, (u16
)nfc
->fs
.location
, idx
);
7248 /* fill the flow spec entry */
7249 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7250 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7251 ret
= niu_class_to_ethflow(class, &fsp
->flow_type
);
7254 pr_info(PFX
"niu%d: %s niu_class_to_ethflow failed\n",
7255 parent
->index
, np
->dev
->name
);
7260 if (fsp
->flow_type
== AH_V4_FLOW
|| fsp
->flow_type
== AH_V6_FLOW
) {
7261 u32 proto
= (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7262 TCAM_V4KEY2_PROTO_SHIFT
;
7263 if (proto
== IPPROTO_ESP
) {
7264 if (fsp
->flow_type
== AH_V4_FLOW
)
7265 fsp
->flow_type
= ESP_V4_FLOW
;
7267 fsp
->flow_type
= ESP_V6_FLOW
;
7271 switch (fsp
->flow_type
) {
7277 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7284 /* Not yet implemented */
7288 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7298 if (tp
->assoc_data
& TCAM_ASSOCDATA_DISC
)
7299 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
7301 fsp
->ring_cookie
= (tp
->assoc_data
& TCAM_ASSOCDATA_OFFSET
) >>
7302 TCAM_ASSOCDATA_OFFSET_SHIFT
;
7304 /* put the tcam size here */
7305 nfc
->data
= tcam_get_size(np
);
7310 static int niu_get_ethtool_tcam_all(struct niu
*np
,
7311 struct ethtool_rxnfc
*nfc
,
7314 struct niu_parent
*parent
= np
->parent
;
7315 struct niu_tcam_entry
*tp
;
7318 unsigned long flags
;
7321 /* put the tcam size here */
7322 nfc
->data
= tcam_get_size(np
);
7324 niu_lock_parent(np
, flags
);
7325 n_entries
= nfc
->rule_cnt
;
7326 for (cnt
= 0, i
= 0; i
< nfc
->data
; i
++) {
7327 idx
= tcam_get_index(np
, i
);
7328 tp
= &parent
->tcam
[idx
];
7334 niu_unlock_parent(np
, flags
);
7336 if (n_entries
!= cnt
) {
7337 /* print warning, this should not happen */
7338 pr_info(PFX
"niu%d: %s In niu_get_ethtool_tcam_all, "
7339 "n_entries[%d] != cnt[%d]!!!\n\n",
7340 np
->parent
->index
, np
->dev
->name
, n_entries
, cnt
);
7346 static int niu_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
7349 struct niu
*np
= netdev_priv(dev
);
7354 ret
= niu_get_hash_opts(np
, cmd
);
7356 case ETHTOOL_GRXRINGS
:
7357 cmd
->data
= np
->num_rx_rings
;
7359 case ETHTOOL_GRXCLSRLCNT
:
7360 cmd
->rule_cnt
= tcam_get_valid_entry_cnt(np
);
7362 case ETHTOOL_GRXCLSRULE
:
7363 ret
= niu_get_ethtool_tcam_entry(np
, cmd
);
7365 case ETHTOOL_GRXCLSRLALL
:
7366 ret
= niu_get_ethtool_tcam_all(np
, cmd
, (u32
*)rule_locs
);
7376 static int niu_set_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7380 unsigned long flags
;
7382 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7385 if (class < CLASS_CODE_USER_PROG1
||
7386 class > CLASS_CODE_SCTP_IPV6
)
7389 if (nfc
->data
& RXH_DISCARD
) {
7390 niu_lock_parent(np
, flags
);
7391 flow_key
= np
->parent
->tcam_key
[class -
7392 CLASS_CODE_USER_PROG1
];
7393 flow_key
|= TCAM_KEY_DISC
;
7394 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7395 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7396 niu_unlock_parent(np
, flags
);
7399 /* Discard was set before, but is not set now */
7400 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7402 niu_lock_parent(np
, flags
);
7403 flow_key
= np
->parent
->tcam_key
[class -
7404 CLASS_CODE_USER_PROG1
];
7405 flow_key
&= ~TCAM_KEY_DISC
;
7406 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
7408 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
7410 niu_unlock_parent(np
, flags
);
7414 if (!niu_ethflow_to_flowkey(nfc
->data
, &flow_key
))
7417 niu_lock_parent(np
, flags
);
7418 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7419 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7420 niu_unlock_parent(np
, flags
);
7425 static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec
*fsp
,
7426 struct niu_tcam_entry
*tp
,
7427 int l2_rdc_tab
, u64
class)
7430 u32 sip
, dip
, sipm
, dipm
, spi
, spim
;
7431 u16 sport
, dport
, spm
, dpm
;
7433 sip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7434 sipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7435 dip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7436 dipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7438 tp
->key
[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7439 tp
->key_mask
[0] = TCAM_V4KEY0_CLASS_CODE
;
7440 tp
->key
[1] = (u64
)l2_rdc_tab
<< TCAM_V4KEY1_L2RDCNUM_SHIFT
;
7441 tp
->key_mask
[1] = TCAM_V4KEY1_L2RDCNUM
;
7443 tp
->key
[3] = (u64
)sip
<< TCAM_V4KEY3_SADDR_SHIFT
;
7446 tp
->key_mask
[3] = (u64
)sipm
<< TCAM_V4KEY3_SADDR_SHIFT
;
7447 tp
->key_mask
[3] |= dipm
;
7449 tp
->key
[2] |= ((u64
)fsp
->h_u
.tcp_ip4_spec
.tos
<<
7450 TCAM_V4KEY2_TOS_SHIFT
);
7451 tp
->key_mask
[2] |= ((u64
)fsp
->m_u
.tcp_ip4_spec
.tos
<<
7452 TCAM_V4KEY2_TOS_SHIFT
);
7453 switch (fsp
->flow_type
) {
7457 sport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7458 spm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7459 dport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7460 dpm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7462 tp
->key
[2] |= (((u64
)sport
<< 16) | dport
);
7463 tp
->key_mask
[2] |= (((u64
)spm
<< 16) | dpm
);
7464 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7468 spi
= be32_to_cpu(fsp
->h_u
.ah_ip4_spec
.spi
);
7469 spim
= be32_to_cpu(fsp
->m_u
.ah_ip4_spec
.spi
);
7472 tp
->key_mask
[2] |= spim
;
7473 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7476 spi
= be32_to_cpu(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7477 spim
= be32_to_cpu(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7480 tp
->key_mask
[2] |= spim
;
7481 pid
= fsp
->h_u
.usr_ip4_spec
.proto
;
7487 tp
->key
[2] |= ((u64
)pid
<< TCAM_V4KEY2_PROTO_SHIFT
);
7489 tp
->key_mask
[2] |= TCAM_V4KEY2_PROTO
;
7493 static int niu_add_ethtool_tcam_entry(struct niu
*np
,
7494 struct ethtool_rxnfc
*nfc
)
7496 struct niu_parent
*parent
= np
->parent
;
7497 struct niu_tcam_entry
*tp
;
7498 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7499 struct niu_rdc_tables
*rdc_table
= &parent
->rdc_group_cfg
[np
->port
];
7500 int l2_rdc_table
= rdc_table
->first_table_num
;
7503 unsigned long flags
;
7508 idx
= nfc
->fs
.location
;
7509 if (idx
>= tcam_get_size(np
))
7512 if (fsp
->flow_type
== IP_USER_FLOW
) {
7514 int add_usr_cls
= 0;
7516 struct ethtool_usrip4_spec
*uspec
= &fsp
->h_u
.usr_ip4_spec
;
7517 struct ethtool_usrip4_spec
*umask
= &fsp
->m_u
.usr_ip4_spec
;
7519 niu_lock_parent(np
, flags
);
7521 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7522 if (parent
->l3_cls
[i
]) {
7523 if (uspec
->proto
== parent
->l3_cls_pid
[i
]) {
7524 class = parent
->l3_cls
[i
];
7525 parent
->l3_cls_refcnt
[i
]++;
7530 /* Program new user IP class */
7533 class = CLASS_CODE_USER_PROG1
;
7536 class = CLASS_CODE_USER_PROG2
;
7539 class = CLASS_CODE_USER_PROG3
;
7542 class = CLASS_CODE_USER_PROG4
;
7547 if (uspec
->ip_ver
== ETH_RX_NFC_IP6
)
7549 ret
= tcam_user_ip_class_set(np
, class, ipv6
,
7556 ret
= tcam_user_ip_class_enable(np
, class, 1);
7559 parent
->l3_cls
[i
] = class;
7560 parent
->l3_cls_pid
[i
] = uspec
->proto
;
7561 parent
->l3_cls_refcnt
[i
]++;
7567 pr_info(PFX
"niu%d: %s niu_add_ethtool_tcam_entry: "
7568 "Could not find/insert class for pid %d\n",
7569 parent
->index
, np
->dev
->name
, uspec
->proto
);
7573 niu_unlock_parent(np
, flags
);
7575 if (!niu_ethflow_to_class(fsp
->flow_type
, &class)) {
7580 niu_lock_parent(np
, flags
);
7582 idx
= tcam_get_index(np
, idx
);
7583 tp
= &parent
->tcam
[idx
];
7585 memset(tp
, 0, sizeof(*tp
));
7587 /* fill in the tcam key and mask */
7588 switch (fsp
->flow_type
) {
7594 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7601 /* Not yet implemented */
7602 pr_info(PFX
"niu%d: %s In niu_add_ethtool_tcam_entry: "
7603 "flow %d for IPv6 not implemented\n\n",
7604 parent
->index
, np
->dev
->name
, fsp
->flow_type
);
7608 if (fsp
->h_u
.usr_ip4_spec
.ip_ver
== ETH_RX_NFC_IP4
) {
7609 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
,
7612 /* Not yet implemented */
7613 pr_info(PFX
"niu%d: %s In niu_add_ethtool_tcam_entry: "
7614 "usr flow for IPv6 not implemented\n\n",
7615 parent
->index
, np
->dev
->name
);
7621 pr_info(PFX
"niu%d: %s In niu_add_ethtool_tcam_entry: "
7622 "Unknown flow type %d\n\n",
7623 parent
->index
, np
->dev
->name
, fsp
->flow_type
);
7628 /* fill in the assoc data */
7629 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
7630 tp
->assoc_data
= TCAM_ASSOCDATA_DISC
;
7632 if (fsp
->ring_cookie
>= np
->num_rx_rings
) {
7633 pr_info(PFX
"niu%d: %s In niu_add_ethtool_tcam_entry: "
7634 "Invalid RX ring %lld\n\n",
7635 parent
->index
, np
->dev
->name
,
7636 (long long) fsp
->ring_cookie
);
7640 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
7641 (fsp
->ring_cookie
<<
7642 TCAM_ASSOCDATA_OFFSET_SHIFT
));
7645 err
= tcam_write(np
, idx
, tp
->key
, tp
->key_mask
);
7650 err
= tcam_assoc_write(np
, idx
, tp
->assoc_data
);
7656 /* validate the entry */
7658 np
->clas
.tcam_valid_entries
++;
7660 niu_unlock_parent(np
, flags
);
7665 static int niu_del_ethtool_tcam_entry(struct niu
*np
, u32 loc
)
7667 struct niu_parent
*parent
= np
->parent
;
7668 struct niu_tcam_entry
*tp
;
7670 unsigned long flags
;
7674 if (loc
>= tcam_get_size(np
))
7677 niu_lock_parent(np
, flags
);
7679 idx
= tcam_get_index(np
, loc
);
7680 tp
= &parent
->tcam
[idx
];
7682 /* if the entry is of a user defined class, then update*/
7683 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7684 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7686 if (class >= CLASS_CODE_USER_PROG1
&& class <= CLASS_CODE_USER_PROG4
) {
7688 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7689 if (parent
->l3_cls
[i
] == class) {
7690 parent
->l3_cls_refcnt
[i
]--;
7691 if (!parent
->l3_cls_refcnt
[i
]) {
7693 ret
= tcam_user_ip_class_enable(np
,
7698 parent
->l3_cls
[i
] = 0;
7699 parent
->l3_cls_pid
[i
] = 0;
7704 if (i
== NIU_L3_PROG_CLS
) {
7705 pr_info(PFX
"niu%d: %s In niu_del_ethtool_tcam_entry,"
7706 "Usr class 0x%llx not found \n",
7707 parent
->index
, np
->dev
->name
,
7708 (unsigned long long) class);
7714 ret
= tcam_flush(np
, idx
);
7718 /* invalidate the entry */
7720 np
->clas
.tcam_valid_entries
--;
7722 niu_unlock_parent(np
, flags
);
7727 static int niu_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
7729 struct niu
*np
= netdev_priv(dev
);
7734 ret
= niu_set_hash_opts(np
, cmd
);
7736 case ETHTOOL_SRXCLSRLINS
:
7737 ret
= niu_add_ethtool_tcam_entry(np
, cmd
);
7739 case ETHTOOL_SRXCLSRLDEL
:
7740 ret
= niu_del_ethtool_tcam_entry(np
, cmd
->fs
.location
);
7750 static const struct {
7751 const char string
[ETH_GSTRING_LEN
];
7752 } niu_xmac_stat_keys
[] = {
7755 { "tx_fifo_errors" },
7756 { "tx_overflow_errors" },
7757 { "tx_max_pkt_size_errors" },
7758 { "tx_underflow_errors" },
7759 { "rx_local_faults" },
7760 { "rx_remote_faults" },
7761 { "rx_link_faults" },
7762 { "rx_align_errors" },
7774 { "rx_code_violations" },
7775 { "rx_len_errors" },
7776 { "rx_crc_errors" },
7777 { "rx_underflows" },
7779 { "pause_off_state" },
7780 { "pause_on_state" },
7781 { "pause_received" },
7784 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
7786 static const struct {
7787 const char string
[ETH_GSTRING_LEN
];
7788 } niu_bmac_stat_keys
[] = {
7789 { "tx_underflow_errors" },
7790 { "tx_max_pkt_size_errors" },
7795 { "rx_align_errors" },
7796 { "rx_crc_errors" },
7797 { "rx_len_errors" },
7798 { "pause_off_state" },
7799 { "pause_on_state" },
7800 { "pause_received" },
7803 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
7805 static const struct {
7806 const char string
[ETH_GSTRING_LEN
];
7807 } niu_rxchan_stat_keys
[] = {
7815 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
7817 static const struct {
7818 const char string
[ETH_GSTRING_LEN
];
7819 } niu_txchan_stat_keys
[] = {
7826 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
7828 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
7830 struct niu
*np
= netdev_priv(dev
);
7833 if (stringset
!= ETH_SS_STATS
)
7836 if (np
->flags
& NIU_FLAGS_XMAC
) {
7837 memcpy(data
, niu_xmac_stat_keys
,
7838 sizeof(niu_xmac_stat_keys
));
7839 data
+= sizeof(niu_xmac_stat_keys
);
7841 memcpy(data
, niu_bmac_stat_keys
,
7842 sizeof(niu_bmac_stat_keys
));
7843 data
+= sizeof(niu_bmac_stat_keys
);
7845 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7846 memcpy(data
, niu_rxchan_stat_keys
,
7847 sizeof(niu_rxchan_stat_keys
));
7848 data
+= sizeof(niu_rxchan_stat_keys
);
7850 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7851 memcpy(data
, niu_txchan_stat_keys
,
7852 sizeof(niu_txchan_stat_keys
));
7853 data
+= sizeof(niu_txchan_stat_keys
);
7857 static int niu_get_stats_count(struct net_device
*dev
)
7859 struct niu
*np
= netdev_priv(dev
);
7861 return ((np
->flags
& NIU_FLAGS_XMAC
?
7862 NUM_XMAC_STAT_KEYS
:
7863 NUM_BMAC_STAT_KEYS
) +
7864 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
7865 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
));
7868 static void niu_get_ethtool_stats(struct net_device
*dev
,
7869 struct ethtool_stats
*stats
, u64
*data
)
7871 struct niu
*np
= netdev_priv(dev
);
7874 niu_sync_mac_stats(np
);
7875 if (np
->flags
& NIU_FLAGS_XMAC
) {
7876 memcpy(data
, &np
->mac_stats
.xmac
,
7877 sizeof(struct niu_xmac_stats
));
7878 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
7880 memcpy(data
, &np
->mac_stats
.bmac
,
7881 sizeof(struct niu_bmac_stats
));
7882 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
7884 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7885 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
7887 niu_sync_rx_discard_stats(np
, rp
, 0);
7889 data
[0] = rp
->rx_channel
;
7890 data
[1] = rp
->rx_packets
;
7891 data
[2] = rp
->rx_bytes
;
7892 data
[3] = rp
->rx_dropped
;
7893 data
[4] = rp
->rx_errors
;
7896 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7897 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
7899 data
[0] = rp
->tx_channel
;
7900 data
[1] = rp
->tx_packets
;
7901 data
[2] = rp
->tx_bytes
;
7902 data
[3] = rp
->tx_errors
;
7907 static u64
niu_led_state_save(struct niu
*np
)
7909 if (np
->flags
& NIU_FLAGS_XMAC
)
7910 return nr64_mac(XMAC_CONFIG
);
7912 return nr64_mac(BMAC_XIF_CONFIG
);
7915 static void niu_led_state_restore(struct niu
*np
, u64 val
)
7917 if (np
->flags
& NIU_FLAGS_XMAC
)
7918 nw64_mac(XMAC_CONFIG
, val
);
7920 nw64_mac(BMAC_XIF_CONFIG
, val
);
7923 static void niu_force_led(struct niu
*np
, int on
)
7927 if (np
->flags
& NIU_FLAGS_XMAC
) {
7929 bit
= XMAC_CONFIG_FORCE_LED_ON
;
7931 reg
= BMAC_XIF_CONFIG
;
7932 bit
= BMAC_XIF_CONFIG_LINK_LED
;
7935 val
= nr64_mac(reg
);
7943 static int niu_phys_id(struct net_device
*dev
, u32 data
)
7945 struct niu
*np
= netdev_priv(dev
);
7949 if (!netif_running(dev
))
7955 orig_led_state
= niu_led_state_save(np
);
7956 for (i
= 0; i
< (data
* 2); i
++) {
7957 int on
= ((i
% 2) == 0);
7959 niu_force_led(np
, on
);
7961 if (msleep_interruptible(500))
7964 niu_led_state_restore(np
, orig_led_state
);
7969 static const struct ethtool_ops niu_ethtool_ops
= {
7970 .get_drvinfo
= niu_get_drvinfo
,
7971 .get_link
= ethtool_op_get_link
,
7972 .get_msglevel
= niu_get_msglevel
,
7973 .set_msglevel
= niu_set_msglevel
,
7974 .nway_reset
= niu_nway_reset
,
7975 .get_eeprom_len
= niu_get_eeprom_len
,
7976 .get_eeprom
= niu_get_eeprom
,
7977 .get_settings
= niu_get_settings
,
7978 .set_settings
= niu_set_settings
,
7979 .get_strings
= niu_get_strings
,
7980 .get_stats_count
= niu_get_stats_count
,
7981 .get_ethtool_stats
= niu_get_ethtool_stats
,
7982 .phys_id
= niu_phys_id
,
7983 .get_rxnfc
= niu_get_nfc
,
7984 .set_rxnfc
= niu_set_nfc
,
7987 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
7990 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
7992 if (ldn
< 0 || ldn
> LDN_MAX
)
7995 parent
->ldg_map
[ldn
] = ldg
;
7997 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
7998 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7999 * the firmware, and we're not supposed to change them.
8000 * Validate the mapping, because if it's wrong we probably
8001 * won't get any interrupts and that's painful to debug.
8003 if (nr64(LDG_NUM(ldn
)) != ldg
) {
8004 dev_err(np
->device
, PFX
"Port %u, mis-matched "
8006 "for ldn %d, should be %d is %llu\n",
8008 (unsigned long long) nr64(LDG_NUM(ldn
)));
8012 nw64(LDG_NUM(ldn
), ldg
);
8017 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
8019 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
8023 nw64(LDG_TIMER_RES
, res
);
8028 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
8030 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
8031 (func
< 0 || func
> 3) ||
8032 (vector
< 0 || vector
> 0x1f))
8035 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
8040 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
8042 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
8043 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
8046 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
8050 nw64(ESPC_PIO_STAT
, frame
);
8054 frame
= nr64(ESPC_PIO_STAT
);
8055 if (frame
& ESPC_PIO_STAT_READ_END
)
8058 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8059 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
8060 (unsigned long long) frame
);
8065 nw64(ESPC_PIO_STAT
, frame
);
8069 frame
= nr64(ESPC_PIO_STAT
);
8070 if (frame
& ESPC_PIO_STAT_READ_END
)
8073 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8074 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
8075 (unsigned long long) frame
);
8079 frame
= nr64(ESPC_PIO_STAT
);
8080 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
8083 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
8085 int err
= niu_pci_eeprom_read(np
, off
);
8091 err
= niu_pci_eeprom_read(np
, off
+ 1);
8094 val
|= (err
& 0xff);
8099 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
8101 int err
= niu_pci_eeprom_read(np
, off
);
8108 err
= niu_pci_eeprom_read(np
, off
+ 1);
8112 val
|= (err
& 0xff) << 8;
8117 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
8124 for (i
= 0; i
< namebuf_len
; i
++) {
8125 int err
= niu_pci_eeprom_read(np
, off
+ i
);
8132 if (i
>= namebuf_len
)
8138 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
8140 struct niu_vpd
*vpd
= &np
->vpd
;
8141 int len
= strlen(vpd
->version
) + 1;
8142 const char *s
= vpd
->version
;
8145 for (i
= 0; i
< len
- 5; i
++) {
8146 if (!strncmp(s
+ i
, "FCode ", 5))
8153 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
8155 niudbg(PROBE
, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8156 vpd
->fcode_major
, vpd
->fcode_minor
);
8157 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
8158 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
8159 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
8160 np
->flags
|= NIU_FLAGS_VPD_VALID
;
8163 /* ESPC_PIO_EN_ENABLE must be set */
8164 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
8167 unsigned int found_mask
= 0;
8168 #define FOUND_MASK_MODEL 0x00000001
8169 #define FOUND_MASK_BMODEL 0x00000002
8170 #define FOUND_MASK_VERS 0x00000004
8171 #define FOUND_MASK_MAC 0x00000008
8172 #define FOUND_MASK_NMAC 0x00000010
8173 #define FOUND_MASK_PHY 0x00000020
8174 #define FOUND_MASK_ALL 0x0000003f
8176 niudbg(PROBE
, "VPD_SCAN: start[%x] end[%x]\n",
8178 while (start
< end
) {
8179 int len
, err
, instance
, type
, prop_len
;
8184 if (found_mask
== FOUND_MASK_ALL
) {
8185 niu_vpd_parse_version(np
);
8189 err
= niu_pci_eeprom_read(np
, start
+ 2);
8195 instance
= niu_pci_eeprom_read(np
, start
);
8196 type
= niu_pci_eeprom_read(np
, start
+ 3);
8197 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
8198 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
8204 if (!strcmp(namebuf
, "model")) {
8205 prop_buf
= np
->vpd
.model
;
8206 max_len
= NIU_VPD_MODEL_MAX
;
8207 found_mask
|= FOUND_MASK_MODEL
;
8208 } else if (!strcmp(namebuf
, "board-model")) {
8209 prop_buf
= np
->vpd
.board_model
;
8210 max_len
= NIU_VPD_BD_MODEL_MAX
;
8211 found_mask
|= FOUND_MASK_BMODEL
;
8212 } else if (!strcmp(namebuf
, "version")) {
8213 prop_buf
= np
->vpd
.version
;
8214 max_len
= NIU_VPD_VERSION_MAX
;
8215 found_mask
|= FOUND_MASK_VERS
;
8216 } else if (!strcmp(namebuf
, "local-mac-address")) {
8217 prop_buf
= np
->vpd
.local_mac
;
8219 found_mask
|= FOUND_MASK_MAC
;
8220 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
8221 prop_buf
= &np
->vpd
.mac_num
;
8223 found_mask
|= FOUND_MASK_NMAC
;
8224 } else if (!strcmp(namebuf
, "phy-type")) {
8225 prop_buf
= np
->vpd
.phy_type
;
8226 max_len
= NIU_VPD_PHY_TYPE_MAX
;
8227 found_mask
|= FOUND_MASK_PHY
;
8230 if (max_len
&& prop_len
> max_len
) {
8231 dev_err(np
->device
, PFX
"Property '%s' length (%d) is "
8232 "too long.\n", namebuf
, prop_len
);
8237 u32 off
= start
+ 5 + err
;
8240 niudbg(PROBE
, "VPD_SCAN: Reading in property [%s] "
8241 "len[%d]\n", namebuf
, prop_len
);
8242 for (i
= 0; i
< prop_len
; i
++)
8243 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
8252 /* ESPC_PIO_EN_ENABLE must be set */
8253 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
8258 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
8264 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
8265 u32 here
= start
+ offset
;
8268 err
= niu_pci_eeprom_read(np
, here
);
8272 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
8276 here
= start
+ offset
+ 3;
8277 end
= start
+ offset
+ err
;
8281 err
= niu_pci_vpd_scan_props(np
, here
, end
);
8282 if (err
< 0 || err
== 1)
8287 /* ESPC_PIO_EN_ENABLE must be set */
8288 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
8290 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
8293 while (start
< end
) {
8296 /* ROM header signature? */
8297 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8301 /* Apply offset to PCI data structure. */
8302 err
= niu_pci_eeprom_read16(np
, start
+ 23);
8307 /* Check for "PCIR" signature. */
8308 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8311 err
= niu_pci_eeprom_read16(np
, start
+ 2);
8315 /* Check for OBP image type. */
8316 err
= niu_pci_eeprom_read(np
, start
+ 20);
8320 err
= niu_pci_eeprom_read(np
, ret
+ 2);
8324 start
= ret
+ (err
* 512);
8328 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
8333 err
= niu_pci_eeprom_read(np
, ret
+ 0);
8343 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
8344 const char *phy_prop
)
8346 if (!strcmp(phy_prop
, "mif")) {
8347 /* 1G copper, MII */
8348 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8350 np
->mac_xcvr
= MAC_XCVR_MII
;
8351 } else if (!strcmp(phy_prop
, "xgf")) {
8352 /* 10G fiber, XPCS */
8353 np
->flags
|= (NIU_FLAGS_10G
|
8355 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8356 } else if (!strcmp(phy_prop
, "pcs")) {
8358 np
->flags
&= ~NIU_FLAGS_10G
;
8359 np
->flags
|= NIU_FLAGS_FIBER
;
8360 np
->mac_xcvr
= MAC_XCVR_PCS
;
8361 } else if (!strcmp(phy_prop
, "xgc")) {
8362 /* 10G copper, XPCS */
8363 np
->flags
|= NIU_FLAGS_10G
;
8364 np
->flags
&= ~NIU_FLAGS_FIBER
;
8365 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8366 } else if (!strcmp(phy_prop
, "xgsd") || !strcmp(phy_prop
, "gsd")) {
8367 /* 10G Serdes or 1G Serdes, default to 10G */
8368 np
->flags
|= NIU_FLAGS_10G
;
8369 np
->flags
&= ~NIU_FLAGS_FIBER
;
8370 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8371 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8378 static int niu_pci_vpd_get_nports(struct niu
*np
)
8382 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
8383 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
8384 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
8385 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
8386 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
8388 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
8389 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
8390 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
8391 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
8398 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
8400 struct net_device
*dev
= np
->dev
;
8401 struct niu_vpd
*vpd
= &np
->vpd
;
8404 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
8405 dev_err(np
->device
, PFX
"VPD MAC invalid, "
8406 "falling back to SPROM.\n");
8408 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8412 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8413 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8414 np
->flags
|= NIU_FLAGS_10G
;
8415 np
->flags
&= ~NIU_FLAGS_FIBER
;
8416 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8417 np
->mac_xcvr
= MAC_XCVR_PCS
;
8419 np
->flags
|= NIU_FLAGS_FIBER
;
8420 np
->flags
&= ~NIU_FLAGS_10G
;
8422 if (np
->flags
& NIU_FLAGS_10G
)
8423 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8424 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8425 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
8426 NIU_FLAGS_HOTPLUG_PHY
);
8427 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8428 dev_err(np
->device
, PFX
"Illegal phy string [%s].\n",
8430 dev_err(np
->device
, PFX
"Falling back to SPROM.\n");
8431 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8435 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
8437 val8
= dev
->perm_addr
[5];
8438 dev
->perm_addr
[5] += np
->port
;
8439 if (dev
->perm_addr
[5] < val8
)
8440 dev
->perm_addr
[4]++;
8442 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8445 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
8447 struct net_device
*dev
= np
->dev
;
8452 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
8453 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
8456 np
->eeprom_len
= len
;
8458 niudbg(PROBE
, "SPROM: Image size %llu\n", (unsigned long long) val
);
8461 for (i
= 0; i
< len
; i
++) {
8462 val
= nr64(ESPC_NCR(i
));
8463 sum
+= (val
>> 0) & 0xff;
8464 sum
+= (val
>> 8) & 0xff;
8465 sum
+= (val
>> 16) & 0xff;
8466 sum
+= (val
>> 24) & 0xff;
8468 niudbg(PROBE
, "SPROM: Checksum %x\n", (int)(sum
& 0xff));
8469 if ((sum
& 0xff) != 0xab) {
8470 dev_err(np
->device
, PFX
"Bad SPROM checksum "
8471 "(%x, should be 0xab)\n", (int) (sum
& 0xff));
8475 val
= nr64(ESPC_PHY_TYPE
);
8478 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
8479 ESPC_PHY_TYPE_PORT0_SHIFT
;
8482 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
8483 ESPC_PHY_TYPE_PORT1_SHIFT
;
8486 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
8487 ESPC_PHY_TYPE_PORT2_SHIFT
;
8490 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
8491 ESPC_PHY_TYPE_PORT3_SHIFT
;
8494 dev_err(np
->device
, PFX
"Bogus port number %u\n",
8498 niudbg(PROBE
, "SPROM: PHY type %x\n", val8
);
8501 case ESPC_PHY_TYPE_1G_COPPER
:
8502 /* 1G copper, MII */
8503 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8505 np
->mac_xcvr
= MAC_XCVR_MII
;
8508 case ESPC_PHY_TYPE_1G_FIBER
:
8510 np
->flags
&= ~NIU_FLAGS_10G
;
8511 np
->flags
|= NIU_FLAGS_FIBER
;
8512 np
->mac_xcvr
= MAC_XCVR_PCS
;
8515 case ESPC_PHY_TYPE_10G_COPPER
:
8516 /* 10G copper, XPCS */
8517 np
->flags
|= NIU_FLAGS_10G
;
8518 np
->flags
&= ~NIU_FLAGS_FIBER
;
8519 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8522 case ESPC_PHY_TYPE_10G_FIBER
:
8523 /* 10G fiber, XPCS */
8524 np
->flags
|= (NIU_FLAGS_10G
|
8526 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8530 dev_err(np
->device
, PFX
"Bogus SPROM phy type %u\n", val8
);
8534 val
= nr64(ESPC_MAC_ADDR0
);
8535 niudbg(PROBE
, "SPROM: MAC_ADDR0[%08llx]\n",
8536 (unsigned long long) val
);
8537 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
8538 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
8539 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
8540 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
8542 val
= nr64(ESPC_MAC_ADDR1
);
8543 niudbg(PROBE
, "SPROM: MAC_ADDR1[%08llx]\n",
8544 (unsigned long long) val
);
8545 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
8546 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
8548 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8549 dev_err(np
->device
, PFX
"SPROM MAC address invalid\n");
8550 dev_err(np
->device
, PFX
"[ \n");
8551 for (i
= 0; i
< 6; i
++)
8552 printk("%02x ", dev
->perm_addr
[i
]);
8557 val8
= dev
->perm_addr
[5];
8558 dev
->perm_addr
[5] += np
->port
;
8559 if (dev
->perm_addr
[5] < val8
)
8560 dev
->perm_addr
[4]++;
8562 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8564 val
= nr64(ESPC_MOD_STR_LEN
);
8565 niudbg(PROBE
, "SPROM: MOD_STR_LEN[%llu]\n",
8566 (unsigned long long) val
);
8570 for (i
= 0; i
< val
; i
+= 4) {
8571 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
8573 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
8574 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
8575 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
8576 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
8578 np
->vpd
.model
[val
] = '\0';
8580 val
= nr64(ESPC_BD_MOD_STR_LEN
);
8581 niudbg(PROBE
, "SPROM: BD_MOD_STR_LEN[%llu]\n",
8582 (unsigned long long) val
);
8586 for (i
= 0; i
< val
; i
+= 4) {
8587 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
8589 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
8590 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
8591 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
8592 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
8594 np
->vpd
.board_model
[val
] = '\0';
8597 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
8598 niudbg(PROBE
, "SPROM: NUM_PORTS_MACS[%d]\n",
8604 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
8606 struct niu_parent
*parent
= np
->parent
;
8609 np
->flags
|= NIU_FLAGS_XMAC
;
8611 if (!parent
->num_ports
) {
8612 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8613 parent
->num_ports
= 2;
8615 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
8616 if (!parent
->num_ports
) {
8617 /* Fall back to SPROM as last resort.
8618 * This will fail on most cards.
8620 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
8621 ESPC_NUM_PORTS_MACS_VAL
;
8623 /* All of the current probing methods fail on
8624 * Maramba on-board parts.
8626 if (!parent
->num_ports
)
8627 parent
->num_ports
= 4;
8632 niudbg(PROBE
, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
8633 np
->port
, parent
->num_ports
);
8634 if (np
->port
>= parent
->num_ports
)
8640 static int __devinit
phy_record(struct niu_parent
*parent
,
8641 struct phy_probe_info
*p
,
8642 int dev_id_1
, int dev_id_2
, u8 phy_port
,
8645 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
8648 if (dev_id_1
< 0 || dev_id_2
< 0)
8650 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
8651 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
8652 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
8653 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
8656 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
8660 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8662 (type
== PHY_TYPE_PMA_PMD
?
8664 (type
== PHY_TYPE_PCS
?
8668 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
8669 printk(KERN_ERR PFX
"Too many PHY ports.\n");
8673 p
->phy_id
[type
][idx
] = id
;
8674 p
->phy_port
[type
][idx
] = phy_port
;
8675 p
->cur
[type
] = idx
+ 1;
8679 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
8683 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
8684 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
8687 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
8688 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
8695 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
8701 for (port
= 8; port
< 32; port
++) {
8702 if (port_has_10g(p
, port
)) {
8712 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
8715 if (p
->cur
[PHY_TYPE_MII
])
8716 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
8718 return p
->cur
[PHY_TYPE_MII
];
8721 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
8723 int num_ports
= parent
->num_ports
;
8726 for (i
= 0; i
< num_ports
; i
++) {
8727 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
8728 parent
->txchan_per_port
[i
] = (16 / num_ports
);
8730 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
8733 parent
->rxchan_per_port
[i
],
8734 parent
->txchan_per_port
[i
]);
8738 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
8739 int num_10g
, int num_1g
)
8741 int num_ports
= parent
->num_ports
;
8742 int rx_chans_per_10g
, rx_chans_per_1g
;
8743 int tx_chans_per_10g
, tx_chans_per_1g
;
8744 int i
, tot_rx
, tot_tx
;
8746 if (!num_10g
|| !num_1g
) {
8747 rx_chans_per_10g
= rx_chans_per_1g
=
8748 (NIU_NUM_RXCHAN
/ num_ports
);
8749 tx_chans_per_10g
= tx_chans_per_1g
=
8750 (NIU_NUM_TXCHAN
/ num_ports
);
8752 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
8753 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
8754 (rx_chans_per_1g
* num_1g
)) /
8757 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
8758 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
8759 (tx_chans_per_1g
* num_1g
)) /
8763 tot_rx
= tot_tx
= 0;
8764 for (i
= 0; i
< num_ports
; i
++) {
8765 int type
= phy_decode(parent
->port_phy
, i
);
8767 if (type
== PORT_TYPE_10G
) {
8768 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
8769 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
8771 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
8772 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
8774 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
8777 parent
->rxchan_per_port
[i
],
8778 parent
->txchan_per_port
[i
]);
8779 tot_rx
+= parent
->rxchan_per_port
[i
];
8780 tot_tx
+= parent
->txchan_per_port
[i
];
8783 if (tot_rx
> NIU_NUM_RXCHAN
) {
8784 printk(KERN_ERR PFX
"niu%d: Too many RX channels (%d), "
8785 "resetting to one per port.\n",
8786 parent
->index
, tot_rx
);
8787 for (i
= 0; i
< num_ports
; i
++)
8788 parent
->rxchan_per_port
[i
] = 1;
8790 if (tot_tx
> NIU_NUM_TXCHAN
) {
8791 printk(KERN_ERR PFX
"niu%d: Too many TX channels (%d), "
8792 "resetting to one per port.\n",
8793 parent
->index
, tot_tx
);
8794 for (i
= 0; i
< num_ports
; i
++)
8795 parent
->txchan_per_port
[i
] = 1;
8797 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
8798 printk(KERN_WARNING PFX
"niu%d: Driver bug, wasted channels, "
8800 parent
->index
, tot_rx
, tot_tx
);
8804 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
8805 int num_10g
, int num_1g
)
8807 int i
, num_ports
= parent
->num_ports
;
8808 int rdc_group
, rdc_groups_per_port
;
8809 int rdc_channel_base
;
8812 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
8814 rdc_channel_base
= 0;
8816 for (i
= 0; i
< num_ports
; i
++) {
8817 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
8818 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
8819 int this_channel_offset
;
8821 tp
->first_table_num
= rdc_group
;
8822 tp
->num_tables
= rdc_groups_per_port
;
8823 this_channel_offset
= 0;
8824 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
8825 struct rdc_table
*rt
= &tp
->tables
[grp
];
8828 pr_info(PFX
"niu%d: Port %d RDC tbl(%d) [ ",
8829 parent
->index
, i
, tp
->first_table_num
+ grp
);
8830 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
8831 rt
->rxdma_channel
[slot
] =
8832 rdc_channel_base
+ this_channel_offset
;
8834 printk("%d ", rt
->rxdma_channel
[slot
]);
8836 if (++this_channel_offset
== num_channels
)
8837 this_channel_offset
= 0;
8842 parent
->rdc_default
[i
] = rdc_channel_base
;
8844 rdc_channel_base
+= num_channels
;
8845 rdc_group
+= rdc_groups_per_port
;
8849 static int __devinit
fill_phy_probe_info(struct niu
*np
,
8850 struct niu_parent
*parent
,
8851 struct phy_probe_info
*info
)
8853 unsigned long flags
;
8856 memset(info
, 0, sizeof(*info
));
8858 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
8859 niu_lock_parent(np
, flags
);
8861 for (port
= 8; port
< 32; port
++) {
8862 int dev_id_1
, dev_id_2
;
8864 dev_id_1
= mdio_read(np
, port
,
8865 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
8866 dev_id_2
= mdio_read(np
, port
,
8867 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
8868 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8872 dev_id_1
= mdio_read(np
, port
,
8873 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
8874 dev_id_2
= mdio_read(np
, port
,
8875 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
8876 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8880 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
8881 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
8882 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8887 niu_unlock_parent(np
, flags
);
8892 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
8894 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
8895 int lowest_10g
, lowest_1g
;
8896 int num_10g
, num_1g
;
8900 num_10g
= num_1g
= 0;
8902 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8903 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8906 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
8907 parent
->num_ports
= 4;
8908 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8909 phy_encode(PORT_TYPE_1G
, 1) |
8910 phy_encode(PORT_TYPE_1G
, 2) |
8911 phy_encode(PORT_TYPE_1G
, 3));
8912 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8915 parent
->num_ports
= 2;
8916 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8917 phy_encode(PORT_TYPE_10G
, 1));
8918 } else if ((np
->flags
& NIU_FLAGS_XCVR_SERDES
) &&
8919 (parent
->plat_type
== PLAT_TYPE_NIU
)) {
8920 /* this is the Monza case */
8921 if (np
->flags
& NIU_FLAGS_10G
) {
8922 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8923 phy_encode(PORT_TYPE_10G
, 1));
8925 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8926 phy_encode(PORT_TYPE_1G
, 1));
8929 err
= fill_phy_probe_info(np
, parent
, info
);
8933 num_10g
= count_10g_ports(info
, &lowest_10g
);
8934 num_1g
= count_1g_ports(info
, &lowest_1g
);
8936 switch ((num_10g
<< 4) | num_1g
) {
8938 if (lowest_1g
== 10)
8939 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8940 else if (lowest_1g
== 26)
8941 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8943 goto unknown_vg_1g_port
;
8947 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8948 phy_encode(PORT_TYPE_10G
, 1) |
8949 phy_encode(PORT_TYPE_1G
, 2) |
8950 phy_encode(PORT_TYPE_1G
, 3));
8954 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8955 phy_encode(PORT_TYPE_10G
, 1));
8959 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
8963 if (lowest_1g
== 10)
8964 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8965 else if (lowest_1g
== 26)
8966 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8968 goto unknown_vg_1g_port
;
8972 if ((lowest_10g
& 0x7) == 0)
8973 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8974 phy_encode(PORT_TYPE_1G
, 1) |
8975 phy_encode(PORT_TYPE_1G
, 2) |
8976 phy_encode(PORT_TYPE_1G
, 3));
8978 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8979 phy_encode(PORT_TYPE_10G
, 1) |
8980 phy_encode(PORT_TYPE_1G
, 2) |
8981 phy_encode(PORT_TYPE_1G
, 3));
8985 if (lowest_1g
== 10)
8986 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8987 else if (lowest_1g
== 26)
8988 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8990 goto unknown_vg_1g_port
;
8992 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8993 phy_encode(PORT_TYPE_1G
, 1) |
8994 phy_encode(PORT_TYPE_1G
, 2) |
8995 phy_encode(PORT_TYPE_1G
, 3));
8999 printk(KERN_ERR PFX
"Unsupported port config "
9006 parent
->port_phy
= val
;
9008 if (parent
->plat_type
== PLAT_TYPE_NIU
)
9009 niu_n2_divide_channels(parent
);
9011 niu_divide_channels(parent
, num_10g
, num_1g
);
9013 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
9018 printk(KERN_ERR PFX
"Cannot identify platform type, 1gport=%d\n",
9023 static int __devinit
niu_probe_ports(struct niu
*np
)
9025 struct niu_parent
*parent
= np
->parent
;
9028 niudbg(PROBE
, "niu_probe_ports(): port_phy[%08x]\n",
9031 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
9032 err
= walk_phys(np
, parent
);
9036 niu_set_ldg_timer_res(np
, 2);
9037 for (i
= 0; i
<= LDN_MAX
; i
++)
9038 niu_ldn_irq_enable(np
, i
, 0);
9041 if (parent
->port_phy
== PORT_PHY_INVALID
)
9047 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
9049 struct niu_classifier
*cp
= &np
->clas
;
9051 niudbg(PROBE
, "niu_classifier_swstate_init: num_tcam(%d)\n",
9052 np
->parent
->tcam_num_entries
);
9054 cp
->tcam_top
= (u16
) np
->port
;
9055 cp
->tcam_sz
= np
->parent
->tcam_num_entries
/ np
->parent
->num_ports
;
9056 cp
->h1_init
= 0xffffffff;
9057 cp
->h2_init
= 0xffff;
9059 return fflp_early_init(np
);
9062 static void __devinit
niu_link_config_init(struct niu
*np
)
9064 struct niu_link_config
*lp
= &np
->link_config
;
9066 lp
->advertising
= (ADVERTISED_10baseT_Half
|
9067 ADVERTISED_10baseT_Full
|
9068 ADVERTISED_100baseT_Half
|
9069 ADVERTISED_100baseT_Full
|
9070 ADVERTISED_1000baseT_Half
|
9071 ADVERTISED_1000baseT_Full
|
9072 ADVERTISED_10000baseT_Full
|
9073 ADVERTISED_Autoneg
);
9074 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
9075 lp
->duplex
= DUPLEX_FULL
;
9076 lp
->active_duplex
= DUPLEX_INVALID
;
9079 lp
->loopback_mode
= LOOPBACK_MAC
;
9080 lp
->active_speed
= SPEED_10000
;
9081 lp
->active_duplex
= DUPLEX_FULL
;
9083 lp
->loopback_mode
= LOOPBACK_DISABLED
;
9087 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
9091 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
9092 np
->ipp_off
= 0x00000;
9093 np
->pcs_off
= 0x04000;
9094 np
->xpcs_off
= 0x02000;
9098 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
9099 np
->ipp_off
= 0x08000;
9100 np
->pcs_off
= 0x0a000;
9101 np
->xpcs_off
= 0x08000;
9105 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
9106 np
->ipp_off
= 0x04000;
9107 np
->pcs_off
= 0x0e000;
9108 np
->xpcs_off
= ~0UL;
9112 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
9113 np
->ipp_off
= 0x0c000;
9114 np
->pcs_off
= 0x12000;
9115 np
->xpcs_off
= ~0UL;
9119 dev_err(np
->device
, PFX
"Port %u is invalid, cannot "
9120 "compute MAC block offset.\n", np
->port
);
9127 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
9129 struct msix_entry msi_vec
[NIU_NUM_LDG
];
9130 struct niu_parent
*parent
= np
->parent
;
9131 struct pci_dev
*pdev
= np
->pdev
;
9132 int i
, num_irqs
, err
;
9135 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
9136 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
9137 ldg_num_map
[i
] = first_ldg
+ i
;
9139 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
9140 parent
->txchan_per_port
[np
->port
] +
9141 (np
->port
== 0 ? 3 : 1));
9142 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
9145 for (i
= 0; i
< num_irqs
; i
++) {
9146 msi_vec
[i
].vector
= 0;
9147 msi_vec
[i
].entry
= i
;
9150 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
9152 np
->flags
&= ~NIU_FLAGS_MSIX
;
9160 np
->flags
|= NIU_FLAGS_MSIX
;
9161 for (i
= 0; i
< num_irqs
; i
++)
9162 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
9163 np
->num_ldg
= num_irqs
;
9166 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
9168 #ifdef CONFIG_SPARC64
9169 struct of_device
*op
= np
->op
;
9170 const u32
*int_prop
;
9173 int_prop
= of_get_property(op
->node
, "interrupts", NULL
);
9177 for (i
= 0; i
< op
->num_irqs
; i
++) {
9178 ldg_num_map
[i
] = int_prop
[i
];
9179 np
->ldg
[i
].irq
= op
->irqs
[i
];
9182 np
->num_ldg
= op
->num_irqs
;
9190 static int __devinit
niu_ldg_init(struct niu
*np
)
9192 struct niu_parent
*parent
= np
->parent
;
9193 u8 ldg_num_map
[NIU_NUM_LDG
];
9194 int first_chan
, num_chan
;
9195 int i
, err
, ldg_rotor
;
9199 np
->ldg
[0].irq
= np
->dev
->irq
;
9200 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
9201 err
= niu_n2_irq_init(np
, ldg_num_map
);
9205 niu_try_msix(np
, ldg_num_map
);
9208 for (i
= 0; i
< np
->num_ldg
; i
++) {
9209 struct niu_ldg
*lp
= &np
->ldg
[i
];
9211 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
9214 lp
->ldg_num
= ldg_num_map
[i
];
9215 lp
->timer
= 2; /* XXX */
9217 /* On N2 NIU the firmware has setup the SID mappings so they go
9218 * to the correct values that will route the LDG to the proper
9219 * interrupt in the NCU interrupt table.
9221 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
9222 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
9228 /* We adopt the LDG assignment ordering used by the N2 NIU
9229 * 'interrupt' properties because that simplifies a lot of
9230 * things. This ordering is:
9233 * MIF (if port zero)
9234 * SYSERR (if port zero)
9241 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
9247 if (ldg_rotor
== np
->num_ldg
)
9251 err
= niu_ldg_assign_ldn(np
, parent
,
9252 ldg_num_map
[ldg_rotor
],
9258 if (ldg_rotor
== np
->num_ldg
)
9261 err
= niu_ldg_assign_ldn(np
, parent
,
9262 ldg_num_map
[ldg_rotor
],
9268 if (ldg_rotor
== np
->num_ldg
)
9274 for (i
= 0; i
< port
; i
++)
9275 first_chan
+= parent
->rxchan_per_port
[port
];
9276 num_chan
= parent
->rxchan_per_port
[port
];
9278 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9279 err
= niu_ldg_assign_ldn(np
, parent
,
9280 ldg_num_map
[ldg_rotor
],
9285 if (ldg_rotor
== np
->num_ldg
)
9290 for (i
= 0; i
< port
; i
++)
9291 first_chan
+= parent
->txchan_per_port
[port
];
9292 num_chan
= parent
->txchan_per_port
[port
];
9293 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9294 err
= niu_ldg_assign_ldn(np
, parent
,
9295 ldg_num_map
[ldg_rotor
],
9300 if (ldg_rotor
== np
->num_ldg
)
9307 static void __devexit
niu_ldg_free(struct niu
*np
)
9309 if (np
->flags
& NIU_FLAGS_MSIX
)
9310 pci_disable_msix(np
->pdev
);
9313 static int __devinit
niu_get_of_props(struct niu
*np
)
9315 #ifdef CONFIG_SPARC64
9316 struct net_device
*dev
= np
->dev
;
9317 struct device_node
*dp
;
9318 const char *phy_type
;
9323 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9326 dp
= pci_device_to_OF_node(np
->pdev
);
9328 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
9330 dev_err(np
->device
, PFX
"%s: OF node lacks "
9331 "phy-type property\n",
9336 if (!strcmp(phy_type
, "none"))
9339 strcpy(np
->vpd
.phy_type
, phy_type
);
9341 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
9342 dev_err(np
->device
, PFX
"%s: Illegal phy string [%s].\n",
9343 dp
->full_name
, np
->vpd
.phy_type
);
9347 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
9349 dev_err(np
->device
, PFX
"%s: OF node lacks "
9350 "local-mac-address property\n",
9354 if (prop_len
!= dev
->addr_len
) {
9355 dev_err(np
->device
, PFX
"%s: OF MAC address prop len (%d) "
9357 dp
->full_name
, prop_len
);
9359 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
9360 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
9363 dev_err(np
->device
, PFX
"%s: OF MAC address is invalid\n",
9365 dev_err(np
->device
, PFX
"%s: [ \n",
9367 for (i
= 0; i
< 6; i
++)
9368 printk("%02x ", dev
->perm_addr
[i
]);
9373 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
9375 model
= of_get_property(dp
, "model", &prop_len
);
9378 strcpy(np
->vpd
.model
, model
);
9380 if (of_find_property(dp
, "hot-swappable-phy", &prop_len
)) {
9381 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
9382 NIU_FLAGS_HOTPLUG_PHY
);
9391 static int __devinit
niu_get_invariants(struct niu
*np
)
9393 int err
, have_props
;
9396 err
= niu_get_of_props(np
);
9402 err
= niu_init_mac_ipp_pcs_base(np
);
9407 err
= niu_get_and_validate_port(np
);
9412 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9415 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
9416 offset
= niu_pci_vpd_offset(np
);
9417 niudbg(PROBE
, "niu_get_invariants: VPD offset [%08x]\n",
9420 niu_pci_vpd_fetch(np
, offset
);
9421 nw64(ESPC_PIO_EN
, 0);
9423 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
9424 niu_pci_vpd_validate(np
);
9425 err
= niu_get_and_validate_port(np
);
9430 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
9431 err
= niu_get_and_validate_port(np
);
9434 err
= niu_pci_probe_sprom(np
);
9440 err
= niu_probe_ports(np
);
9446 niu_classifier_swstate_init(np
);
9447 niu_link_config_init(np
);
9449 err
= niu_determine_phy_disposition(np
);
9451 err
= niu_init_link(np
);
9456 static LIST_HEAD(niu_parent_list
);
9457 static DEFINE_MUTEX(niu_parent_lock
);
9458 static int niu_parent_index
;
9460 static ssize_t
show_port_phy(struct device
*dev
,
9461 struct device_attribute
*attr
, char *buf
)
9463 struct platform_device
*plat_dev
= to_platform_device(dev
);
9464 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9465 u32 port_phy
= p
->port_phy
;
9466 char *orig_buf
= buf
;
9469 if (port_phy
== PORT_PHY_UNKNOWN
||
9470 port_phy
== PORT_PHY_INVALID
)
9473 for (i
= 0; i
< p
->num_ports
; i
++) {
9474 const char *type_str
;
9477 type
= phy_decode(port_phy
, i
);
9478 if (type
== PORT_TYPE_10G
)
9483 (i
== 0) ? "%s" : " %s",
9486 buf
+= sprintf(buf
, "\n");
9487 return buf
- orig_buf
;
9490 static ssize_t
show_plat_type(struct device
*dev
,
9491 struct device_attribute
*attr
, char *buf
)
9493 struct platform_device
*plat_dev
= to_platform_device(dev
);
9494 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9495 const char *type_str
;
9497 switch (p
->plat_type
) {
9498 case PLAT_TYPE_ATLAS
:
9504 case PLAT_TYPE_VF_P0
:
9507 case PLAT_TYPE_VF_P1
:
9511 type_str
= "unknown";
9515 return sprintf(buf
, "%s\n", type_str
);
9518 static ssize_t
__show_chan_per_port(struct device
*dev
,
9519 struct device_attribute
*attr
, char *buf
,
9522 struct platform_device
*plat_dev
= to_platform_device(dev
);
9523 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9524 char *orig_buf
= buf
;
9528 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
9530 for (i
= 0; i
< p
->num_ports
; i
++) {
9532 (i
== 0) ? "%d" : " %d",
9535 buf
+= sprintf(buf
, "\n");
9537 return buf
- orig_buf
;
9540 static ssize_t
show_rxchan_per_port(struct device
*dev
,
9541 struct device_attribute
*attr
, char *buf
)
9543 return __show_chan_per_port(dev
, attr
, buf
, 1);
9546 static ssize_t
show_txchan_per_port(struct device
*dev
,
9547 struct device_attribute
*attr
, char *buf
)
9549 return __show_chan_per_port(dev
, attr
, buf
, 1);
9552 static ssize_t
show_num_ports(struct device
*dev
,
9553 struct device_attribute
*attr
, char *buf
)
9555 struct platform_device
*plat_dev
= to_platform_device(dev
);
9556 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9558 return sprintf(buf
, "%d\n", p
->num_ports
);
9561 static struct device_attribute niu_parent_attributes
[] = {
9562 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
9563 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
9564 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
9565 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
9566 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
9570 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
9571 union niu_parent_id
*id
,
9574 struct platform_device
*plat_dev
;
9575 struct niu_parent
*p
;
9578 niudbg(PROBE
, "niu_new_parent: Creating new parent.\n");
9580 plat_dev
= platform_device_register_simple("niu", niu_parent_index
,
9582 if (IS_ERR(plat_dev
))
9585 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
9586 int err
= device_create_file(&plat_dev
->dev
,
9587 &niu_parent_attributes
[i
]);
9589 goto fail_unregister
;
9592 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
9594 goto fail_unregister
;
9596 p
->index
= niu_parent_index
++;
9598 plat_dev
->dev
.platform_data
= p
;
9599 p
->plat_dev
= plat_dev
;
9601 memcpy(&p
->id
, id
, sizeof(*id
));
9602 p
->plat_type
= ptype
;
9603 INIT_LIST_HEAD(&p
->list
);
9604 atomic_set(&p
->refcnt
, 0);
9605 list_add(&p
->list
, &niu_parent_list
);
9606 spin_lock_init(&p
->lock
);
9608 p
->rxdma_clock_divider
= 7500;
9610 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
9611 if (p
->plat_type
== PLAT_TYPE_NIU
)
9612 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
9614 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
9615 int index
= i
- CLASS_CODE_USER_PROG1
;
9617 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
9618 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
9621 (FLOW_KEY_L4_BYTE12
<<
9622 FLOW_KEY_L4_0_SHIFT
) |
9623 (FLOW_KEY_L4_BYTE12
<<
9624 FLOW_KEY_L4_1_SHIFT
));
9627 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
9628 p
->ldg_map
[i
] = LDG_INVALID
;
9633 platform_device_unregister(plat_dev
);
9637 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
9638 union niu_parent_id
*id
,
9641 struct niu_parent
*p
, *tmp
;
9642 int port
= np
->port
;
9644 niudbg(PROBE
, "niu_get_parent: platform_type[%u] port[%u]\n",
9647 mutex_lock(&niu_parent_lock
);
9649 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
9650 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
9656 p
= niu_new_parent(np
, id
, ptype
);
9662 sprintf(port_name
, "port%d", port
);
9663 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
9667 p
->ports
[port
] = np
;
9668 atomic_inc(&p
->refcnt
);
9671 mutex_unlock(&niu_parent_lock
);
9676 static void niu_put_parent(struct niu
*np
)
9678 struct niu_parent
*p
= np
->parent
;
9682 BUG_ON(!p
|| p
->ports
[port
] != np
);
9684 niudbg(PROBE
, "niu_put_parent: port[%u]\n", port
);
9686 sprintf(port_name
, "port%d", port
);
9688 mutex_lock(&niu_parent_lock
);
9690 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
9692 p
->ports
[port
] = NULL
;
9695 if (atomic_dec_and_test(&p
->refcnt
)) {
9697 platform_device_unregister(p
->plat_dev
);
9700 mutex_unlock(&niu_parent_lock
);
9703 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
9704 u64
*handle
, gfp_t flag
)
9709 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
9715 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
9716 void *cpu_addr
, u64 handle
)
9718 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
9721 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
9722 unsigned long offset
, size_t size
,
9723 enum dma_data_direction direction
)
9725 return dma_map_page(dev
, page
, offset
, size
, direction
);
9728 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
9729 size_t size
, enum dma_data_direction direction
)
9731 dma_unmap_page(dev
, dma_address
, size
, direction
);
9734 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
9736 enum dma_data_direction direction
)
9738 return dma_map_single(dev
, cpu_addr
, size
, direction
);
9741 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
9743 enum dma_data_direction direction
)
9745 dma_unmap_single(dev
, dma_address
, size
, direction
);
9748 static const struct niu_ops niu_pci_ops
= {
9749 .alloc_coherent
= niu_pci_alloc_coherent
,
9750 .free_coherent
= niu_pci_free_coherent
,
9751 .map_page
= niu_pci_map_page
,
9752 .unmap_page
= niu_pci_unmap_page
,
9753 .map_single
= niu_pci_map_single
,
9754 .unmap_single
= niu_pci_unmap_single
,
9757 static void __devinit
niu_driver_version(void)
9759 static int niu_version_printed
;
9761 if (niu_version_printed
++ == 0)
9762 pr_info("%s", version
);
9765 static struct net_device
* __devinit
niu_alloc_and_init(
9766 struct device
*gen_dev
, struct pci_dev
*pdev
,
9767 struct of_device
*op
, const struct niu_ops
*ops
,
9770 struct net_device
*dev
;
9773 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
9775 dev_err(gen_dev
, PFX
"Etherdev alloc failed, aborting.\n");
9779 SET_NETDEV_DEV(dev
, gen_dev
);
9781 np
= netdev_priv(dev
);
9785 np
->device
= gen_dev
;
9788 np
->msg_enable
= niu_debug
;
9790 spin_lock_init(&np
->lock
);
9791 INIT_WORK(&np
->reset_task
, niu_reset_task
);
9798 static const struct net_device_ops niu_netdev_ops
= {
9799 .ndo_open
= niu_open
,
9800 .ndo_stop
= niu_close
,
9801 .ndo_start_xmit
= niu_start_xmit
,
9802 .ndo_get_stats
= niu_get_stats
,
9803 .ndo_set_multicast_list
= niu_set_rx_mode
,
9804 .ndo_validate_addr
= eth_validate_addr
,
9805 .ndo_set_mac_address
= niu_set_mac_addr
,
9806 .ndo_do_ioctl
= niu_ioctl
,
9807 .ndo_tx_timeout
= niu_tx_timeout
,
9808 .ndo_change_mtu
= niu_change_mtu
,
9811 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
9813 dev
->netdev_ops
= &niu_netdev_ops
;
9814 dev
->ethtool_ops
= &niu_ethtool_ops
;
9815 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
9818 static void __devinit
niu_device_announce(struct niu
*np
)
9820 struct net_device
*dev
= np
->dev
;
9822 pr_info("%s: NIU Ethernet %pM\n", dev
->name
, dev
->dev_addr
);
9824 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
9825 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9827 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9828 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9829 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
9830 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9831 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9834 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9836 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9837 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9838 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" :
9839 (np
->flags
& NIU_FLAGS_XCVR_SERDES
? "SERDES" :
9841 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9842 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9847 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
9848 const struct pci_device_id
*ent
)
9850 union niu_parent_id parent_id
;
9851 struct net_device
*dev
;
9857 niu_driver_version();
9859 err
= pci_enable_device(pdev
);
9861 dev_err(&pdev
->dev
, PFX
"Cannot enable PCI device, "
9866 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
9867 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9868 dev_err(&pdev
->dev
, PFX
"Cannot find proper PCI device "
9869 "base addresses, aborting.\n");
9871 goto err_out_disable_pdev
;
9874 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9876 dev_err(&pdev
->dev
, PFX
"Cannot obtain PCI resources, "
9878 goto err_out_disable_pdev
;
9881 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
9883 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
9885 goto err_out_free_res
;
9888 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
9889 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
9892 goto err_out_free_res
;
9894 np
= netdev_priv(dev
);
9896 memset(&parent_id
, 0, sizeof(parent_id
));
9897 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
9898 parent_id
.pci
.bus
= pdev
->bus
->number
;
9899 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
9901 np
->parent
= niu_get_parent(np
, &parent_id
,
9905 goto err_out_free_dev
;
9908 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
9909 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
9910 val16
|= (PCI_EXP_DEVCTL_CERE
|
9911 PCI_EXP_DEVCTL_NFERE
|
9912 PCI_EXP_DEVCTL_FERE
|
9913 PCI_EXP_DEVCTL_URRE
|
9914 PCI_EXP_DEVCTL_RELAX_EN
);
9915 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
9917 dma_mask
= DMA_44BIT_MASK
;
9918 err
= pci_set_dma_mask(pdev
, dma_mask
);
9920 dev
->features
|= NETIF_F_HIGHDMA
;
9921 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
9923 dev_err(&pdev
->dev
, PFX
"Unable to obtain 44 bit "
9924 "DMA for consistent allocations, "
9926 goto err_out_release_parent
;
9929 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
9930 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9932 dev_err(&pdev
->dev
, PFX
"No usable DMA configuration, "
9934 goto err_out_release_parent
;
9938 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9940 np
->regs
= pci_ioremap_bar(pdev
, 0);
9942 dev_err(&pdev
->dev
, PFX
"Cannot map device registers, "
9945 goto err_out_release_parent
;
9948 pci_set_master(pdev
);
9949 pci_save_state(pdev
);
9951 dev
->irq
= pdev
->irq
;
9953 niu_assign_netdev_ops(dev
);
9955 err
= niu_get_invariants(np
);
9958 dev_err(&pdev
->dev
, PFX
"Problem fetching invariants "
9959 "of chip, aborting.\n");
9960 goto err_out_iounmap
;
9963 err
= register_netdev(dev
);
9965 dev_err(&pdev
->dev
, PFX
"Cannot register net device, "
9967 goto err_out_iounmap
;
9970 pci_set_drvdata(pdev
, dev
);
9972 niu_device_announce(np
);
9982 err_out_release_parent
:
9989 pci_release_regions(pdev
);
9991 err_out_disable_pdev
:
9992 pci_disable_device(pdev
);
9993 pci_set_drvdata(pdev
, NULL
);
9998 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
10000 struct net_device
*dev
= pci_get_drvdata(pdev
);
10003 struct niu
*np
= netdev_priv(dev
);
10005 unregister_netdev(dev
);
10013 niu_put_parent(np
);
10016 pci_release_regions(pdev
);
10017 pci_disable_device(pdev
);
10018 pci_set_drvdata(pdev
, NULL
);
10022 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
10024 struct net_device
*dev
= pci_get_drvdata(pdev
);
10025 struct niu
*np
= netdev_priv(dev
);
10026 unsigned long flags
;
10028 if (!netif_running(dev
))
10031 flush_scheduled_work();
10032 niu_netif_stop(np
);
10034 del_timer_sync(&np
->timer
);
10036 spin_lock_irqsave(&np
->lock
, flags
);
10037 niu_enable_interrupts(np
, 0);
10038 spin_unlock_irqrestore(&np
->lock
, flags
);
10040 netif_device_detach(dev
);
10042 spin_lock_irqsave(&np
->lock
, flags
);
10044 spin_unlock_irqrestore(&np
->lock
, flags
);
10046 pci_save_state(pdev
);
10051 static int niu_resume(struct pci_dev
*pdev
)
10053 struct net_device
*dev
= pci_get_drvdata(pdev
);
10054 struct niu
*np
= netdev_priv(dev
);
10055 unsigned long flags
;
10058 if (!netif_running(dev
))
10061 pci_restore_state(pdev
);
10063 netif_device_attach(dev
);
10065 spin_lock_irqsave(&np
->lock
, flags
);
10067 err
= niu_init_hw(np
);
10069 np
->timer
.expires
= jiffies
+ HZ
;
10070 add_timer(&np
->timer
);
10071 niu_netif_start(np
);
10074 spin_unlock_irqrestore(&np
->lock
, flags
);
10079 static struct pci_driver niu_pci_driver
= {
10080 .name
= DRV_MODULE_NAME
,
10081 .id_table
= niu_pci_tbl
,
10082 .probe
= niu_pci_init_one
,
10083 .remove
= __devexit_p(niu_pci_remove_one
),
10084 .suspend
= niu_suspend
,
10085 .resume
= niu_resume
,
10088 #ifdef CONFIG_SPARC64
10089 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
10090 u64
*dma_addr
, gfp_t flag
)
10092 unsigned long order
= get_order(size
);
10093 unsigned long page
= __get_free_pages(flag
, order
);
10097 memset((char *)page
, 0, PAGE_SIZE
<< order
);
10098 *dma_addr
= __pa(page
);
10100 return (void *) page
;
10103 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
10104 void *cpu_addr
, u64 handle
)
10106 unsigned long order
= get_order(size
);
10108 free_pages((unsigned long) cpu_addr
, order
);
10111 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
10112 unsigned long offset
, size_t size
,
10113 enum dma_data_direction direction
)
10115 return page_to_phys(page
) + offset
;
10118 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
10119 size_t size
, enum dma_data_direction direction
)
10121 /* Nothing to do. */
10124 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
10126 enum dma_data_direction direction
)
10128 return __pa(cpu_addr
);
10131 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
10133 enum dma_data_direction direction
)
10135 /* Nothing to do. */
10138 static const struct niu_ops niu_phys_ops
= {
10139 .alloc_coherent
= niu_phys_alloc_coherent
,
10140 .free_coherent
= niu_phys_free_coherent
,
10141 .map_page
= niu_phys_map_page
,
10142 .unmap_page
= niu_phys_unmap_page
,
10143 .map_single
= niu_phys_map_single
,
10144 .unmap_single
= niu_phys_unmap_single
,
10147 static unsigned long res_size(struct resource
*r
)
10149 return r
->end
- r
->start
+ 1UL;
10152 static int __devinit
niu_of_probe(struct of_device
*op
,
10153 const struct of_device_id
*match
)
10155 union niu_parent_id parent_id
;
10156 struct net_device
*dev
;
10161 niu_driver_version();
10163 reg
= of_get_property(op
->node
, "reg", NULL
);
10165 dev_err(&op
->dev
, PFX
"%s: No 'reg' property, aborting.\n",
10166 op
->node
->full_name
);
10170 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
10171 &niu_phys_ops
, reg
[0] & 0x1);
10176 np
= netdev_priv(dev
);
10178 memset(&parent_id
, 0, sizeof(parent_id
));
10179 parent_id
.of
= of_get_parent(op
->node
);
10181 np
->parent
= niu_get_parent(np
, &parent_id
,
10185 goto err_out_free_dev
;
10188 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
10190 np
->regs
= of_ioremap(&op
->resource
[1], 0,
10191 res_size(&op
->resource
[1]),
10194 dev_err(&op
->dev
, PFX
"Cannot map device registers, "
10197 goto err_out_release_parent
;
10200 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
10201 res_size(&op
->resource
[2]),
10203 if (!np
->vir_regs_1
) {
10204 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 1, "
10207 goto err_out_iounmap
;
10210 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
10211 res_size(&op
->resource
[3]),
10213 if (!np
->vir_regs_2
) {
10214 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 2, "
10217 goto err_out_iounmap
;
10220 niu_assign_netdev_ops(dev
);
10222 err
= niu_get_invariants(np
);
10224 if (err
!= -ENODEV
)
10225 dev_err(&op
->dev
, PFX
"Problem fetching invariants "
10226 "of chip, aborting.\n");
10227 goto err_out_iounmap
;
10230 err
= register_netdev(dev
);
10232 dev_err(&op
->dev
, PFX
"Cannot register net device, "
10234 goto err_out_iounmap
;
10237 dev_set_drvdata(&op
->dev
, dev
);
10239 niu_device_announce(np
);
10244 if (np
->vir_regs_1
) {
10245 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10246 res_size(&op
->resource
[2]));
10247 np
->vir_regs_1
= NULL
;
10250 if (np
->vir_regs_2
) {
10251 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10252 res_size(&op
->resource
[3]));
10253 np
->vir_regs_2
= NULL
;
10257 of_iounmap(&op
->resource
[1], np
->regs
,
10258 res_size(&op
->resource
[1]));
10262 err_out_release_parent
:
10263 niu_put_parent(np
);
10272 static int __devexit
niu_of_remove(struct of_device
*op
)
10274 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
10277 struct niu
*np
= netdev_priv(dev
);
10279 unregister_netdev(dev
);
10281 if (np
->vir_regs_1
) {
10282 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10283 res_size(&op
->resource
[2]));
10284 np
->vir_regs_1
= NULL
;
10287 if (np
->vir_regs_2
) {
10288 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10289 res_size(&op
->resource
[3]));
10290 np
->vir_regs_2
= NULL
;
10294 of_iounmap(&op
->resource
[1], np
->regs
,
10295 res_size(&op
->resource
[1]));
10301 niu_put_parent(np
);
10304 dev_set_drvdata(&op
->dev
, NULL
);
10309 static const struct of_device_id niu_match
[] = {
10312 .compatible
= "SUNW,niusl",
10316 MODULE_DEVICE_TABLE(of
, niu_match
);
10318 static struct of_platform_driver niu_of_driver
= {
10320 .match_table
= niu_match
,
10321 .probe
= niu_of_probe
,
10322 .remove
= __devexit_p(niu_of_remove
),
10325 #endif /* CONFIG_SPARC64 */
10327 static int __init
niu_init(void)
10331 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
10333 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
10335 #ifdef CONFIG_SPARC64
10336 err
= of_register_driver(&niu_of_driver
, &of_bus_type
);
10340 err
= pci_register_driver(&niu_pci_driver
);
10341 #ifdef CONFIG_SPARC64
10343 of_unregister_driver(&niu_of_driver
);
10350 static void __exit
niu_exit(void)
10352 pci_unregister_driver(&niu_pci_driver
);
10353 #ifdef CONFIG_SPARC64
10354 of_unregister_driver(&niu_of_driver
);
10358 module_init(niu_init
);
10359 module_exit(niu_exit
);