1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/platform_device.h>
16 #include <linux/delay.h>
17 #include <linux/bitops.h>
18 #include <linux/mii.h>
19 #include <linux/if_ether.h>
20 #include <linux/if_vlan.h>
23 #include <linux/ipv6.h>
24 #include <linux/log2.h>
25 #include <linux/jiffies.h>
26 #include <linux/crc32.h>
27 #include <linux/list.h>
28 #include <linux/slab.h>
31 #include <linux/of_device.h>
35 #define DRV_MODULE_NAME "niu"
36 #define DRV_MODULE_VERSION "1.1"
37 #define DRV_MODULE_RELDATE "Apr 22, 2010"
39 static char version
[] __devinitdata
=
40 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION
);
48 static u64
readq(void __iomem
*reg
)
50 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
53 static void writeq(u64 val
, void __iomem
*reg
)
55 writel(val
& 0xffffffff, reg
);
56 writel(val
>> 32, reg
+ 0x4UL
);
60 static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl
) = {
61 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
65 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
67 #define NIU_TX_TIMEOUT (5 * HZ)
69 #define nr64(reg) readq(np->regs + (reg))
70 #define nw64(reg, val) writeq((val), np->regs + (reg))
72 #define nr64_mac(reg) readq(np->mac_regs + (reg))
73 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
75 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
76 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
78 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
79 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
81 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
82 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
84 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
87 static int debug
= -1;
88 module_param(debug
, int, 0);
89 MODULE_PARM_DESC(debug
, "NIU debug level");
91 #define niu_lock_parent(np, flags) \
92 spin_lock_irqsave(&np->parent->lock, flags)
93 #define niu_unlock_parent(np, flags) \
94 spin_unlock_irqrestore(&np->parent->lock, flags)
96 static int serdes_init_10g_serdes(struct niu
*np
);
98 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
99 u64 bits
, int limit
, int delay
)
101 while (--limit
>= 0) {
102 u64 val
= nr64_mac(reg
);
113 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
114 u64 bits
, int limit
, int delay
,
115 const char *reg_name
)
120 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
122 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
123 (unsigned long long)bits
, reg_name
,
124 (unsigned long long)nr64_mac(reg
));
128 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
129 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
130 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
133 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
134 u64 bits
, int limit
, int delay
)
136 while (--limit
>= 0) {
137 u64 val
= nr64_ipp(reg
);
148 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
149 u64 bits
, int limit
, int delay
,
150 const char *reg_name
)
159 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
161 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
162 (unsigned long long)bits
, reg_name
,
163 (unsigned long long)nr64_ipp(reg
));
167 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
168 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
169 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
172 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
173 u64 bits
, int limit
, int delay
)
175 while (--limit
>= 0) {
187 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
188 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
189 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
192 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
193 u64 bits
, int limit
, int delay
,
194 const char *reg_name
)
199 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
201 netdev_err(np
->dev
, "bits (%llx) of register %s would not clear, val[%llx]\n",
202 (unsigned long long)bits
, reg_name
,
203 (unsigned long long)nr64(reg
));
207 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
208 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
209 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
212 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
214 u64 val
= (u64
) lp
->timer
;
217 val
|= LDG_IMGMT_ARM
;
219 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
222 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
224 unsigned long mask_reg
, bits
;
227 if (ldn
< 0 || ldn
> LDN_MAX
)
231 mask_reg
= LD_IM0(ldn
);
234 mask_reg
= LD_IM1(ldn
- 64);
238 val
= nr64(mask_reg
);
248 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
250 struct niu_parent
*parent
= np
->parent
;
253 for (i
= 0; i
<= LDN_MAX
; i
++) {
256 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
259 err
= niu_ldn_irq_enable(np
, i
, on
);
266 static int niu_enable_interrupts(struct niu
*np
, int on
)
270 for (i
= 0; i
< np
->num_ldg
; i
++) {
271 struct niu_ldg
*lp
= &np
->ldg
[i
];
274 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
278 for (i
= 0; i
< np
->num_ldg
; i
++)
279 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
284 static u32
phy_encode(u32 type
, int port
)
286 return type
<< (port
* 2);
289 static u32
phy_decode(u32 val
, int port
)
291 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
294 static int mdio_wait(struct niu
*np
)
299 while (--limit
> 0) {
300 val
= nr64(MIF_FRAME_OUTPUT
);
301 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
302 return val
& MIF_FRAME_OUTPUT_DATA
;
310 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
314 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
319 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
320 return mdio_wait(np
);
323 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
327 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
332 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
340 static int mii_read(struct niu
*np
, int port
, int reg
)
342 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
343 return mdio_wait(np
);
346 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
350 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
358 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
362 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
363 ESR2_TI_PLL_TX_CFG_L(channel
),
366 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
367 ESR2_TI_PLL_TX_CFG_H(channel
),
372 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
376 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
377 ESR2_TI_PLL_RX_CFG_L(channel
),
380 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
381 ESR2_TI_PLL_RX_CFG_H(channel
),
386 /* Mode is always 10G fiber. */
387 static int serdes_init_niu_10g_fiber(struct niu
*np
)
389 struct niu_link_config
*lp
= &np
->link_config
;
393 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
394 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
395 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
396 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
398 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
399 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
401 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
402 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
404 tx_cfg
|= PLL_TX_CFG_ENTEST
;
405 rx_cfg
|= PLL_RX_CFG_ENTEST
;
408 /* Initialize all 4 lanes of the SERDES. */
409 for (i
= 0; i
< 4; i
++) {
410 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
415 for (i
= 0; i
< 4; i
++) {
416 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
424 static int serdes_init_niu_1g_serdes(struct niu
*np
)
426 struct niu_link_config
*lp
= &np
->link_config
;
427 u16 pll_cfg
, pll_sts
;
429 u64
uninitialized_var(sig
), mask
, val
;
434 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
|
435 PLL_TX_CFG_RATE_HALF
);
436 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
437 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
438 PLL_RX_CFG_RATE_HALF
);
441 rx_cfg
|= PLL_RX_CFG_EQ_LP_ADAPTIVE
;
443 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
444 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
446 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
447 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
449 tx_cfg
|= PLL_TX_CFG_ENTEST
;
450 rx_cfg
|= PLL_RX_CFG_ENTEST
;
453 /* Initialize PLL for 1G */
454 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_8X
);
456 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
457 ESR2_TI_PLL_CFG_L
, pll_cfg
);
459 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
464 pll_sts
= PLL_CFG_ENPLL
;
466 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
467 ESR2_TI_PLL_STS_L
, pll_sts
);
469 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
476 /* Initialize all 4 lanes of the SERDES. */
477 for (i
= 0; i
< 4; i
++) {
478 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
483 for (i
= 0; i
< 4; i
++) {
484 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
491 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
496 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
504 while (max_retry
--) {
505 sig
= nr64(ESR_INT_SIGNALS
);
506 if ((sig
& mask
) == val
)
512 if ((sig
& mask
) != val
) {
513 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
514 np
->port
, (int)(sig
& mask
), (int)val
);
521 static int serdes_init_niu_10g_serdes(struct niu
*np
)
523 struct niu_link_config
*lp
= &np
->link_config
;
524 u32 tx_cfg
, rx_cfg
, pll_cfg
, pll_sts
;
526 u64
uninitialized_var(sig
), mask
, val
;
530 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
531 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
532 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
533 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
535 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
536 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
538 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
539 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
541 tx_cfg
|= PLL_TX_CFG_ENTEST
;
542 rx_cfg
|= PLL_RX_CFG_ENTEST
;
545 /* Initialize PLL for 10G */
546 pll_cfg
= (PLL_CFG_ENPLL
| PLL_CFG_MPY_10X
);
548 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
549 ESR2_TI_PLL_CFG_L
, pll_cfg
& 0xffff);
551 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
556 pll_sts
= PLL_CFG_ENPLL
;
558 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
559 ESR2_TI_PLL_STS_L
, pll_sts
& 0xffff);
561 netdev_err(np
->dev
, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
568 /* Initialize all 4 lanes of the SERDES. */
569 for (i
= 0; i
< 4; i
++) {
570 err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
575 for (i
= 0; i
< 4; i
++) {
576 err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
581 /* check if serdes is ready */
585 mask
= ESR_INT_SIGNALS_P0_BITS
;
586 val
= (ESR_INT_SRDY0_P0
|
596 mask
= ESR_INT_SIGNALS_P1_BITS
;
597 val
= (ESR_INT_SRDY0_P1
|
610 while (max_retry
--) {
611 sig
= nr64(ESR_INT_SIGNALS
);
612 if ((sig
& mask
) == val
)
618 if ((sig
& mask
) != val
) {
619 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
620 np
->port
, (int)(sig
& mask
), (int)val
);
622 /* 10G failed, try initializing at 1G */
623 err
= serdes_init_niu_1g_serdes(np
);
625 np
->flags
&= ~NIU_FLAGS_10G
;
626 np
->mac_xcvr
= MAC_XCVR_PCS
;
628 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
636 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
640 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
642 *val
= (err
& 0xffff);
643 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
644 ESR_RXTX_CTRL_H(chan
));
646 *val
|= ((err
& 0xffff) << 16);
652 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
656 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
657 ESR_GLUE_CTRL0_L(chan
));
659 *val
= (err
& 0xffff);
660 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
661 ESR_GLUE_CTRL0_H(chan
));
663 *val
|= ((err
& 0xffff) << 16);
670 static int esr_read_reset(struct niu
*np
, u32
*val
)
674 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
675 ESR_RXTX_RESET_CTRL_L
);
677 *val
= (err
& 0xffff);
678 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
679 ESR_RXTX_RESET_CTRL_H
);
681 *val
|= ((err
& 0xffff) << 16);
688 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
692 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
693 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
695 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
696 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
700 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
704 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
705 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
707 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
708 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
712 static int esr_reset(struct niu
*np
)
714 u32
uninitialized_var(reset
);
717 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
718 ESR_RXTX_RESET_CTRL_L
, 0x0000);
721 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
722 ESR_RXTX_RESET_CTRL_H
, 0xffff);
727 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
728 ESR_RXTX_RESET_CTRL_L
, 0xffff);
733 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
734 ESR_RXTX_RESET_CTRL_H
, 0x0000);
739 err
= esr_read_reset(np
, &reset
);
743 netdev_err(np
->dev
, "Port %u ESR_RESET did not clear [%08x]\n",
751 static int serdes_init_10g(struct niu
*np
)
753 struct niu_link_config
*lp
= &np
->link_config
;
754 unsigned long ctrl_reg
, test_cfg_reg
, i
;
755 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
760 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
761 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
764 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
765 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
771 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
772 ENET_SERDES_CTRL_SDET_1
|
773 ENET_SERDES_CTRL_SDET_2
|
774 ENET_SERDES_CTRL_SDET_3
|
775 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
776 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
777 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
778 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
779 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
780 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
781 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
782 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
785 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
786 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
787 ENET_SERDES_TEST_MD_0_SHIFT
) |
788 (ENET_TEST_MD_PAD_LOOPBACK
<<
789 ENET_SERDES_TEST_MD_1_SHIFT
) |
790 (ENET_TEST_MD_PAD_LOOPBACK
<<
791 ENET_SERDES_TEST_MD_2_SHIFT
) |
792 (ENET_TEST_MD_PAD_LOOPBACK
<<
793 ENET_SERDES_TEST_MD_3_SHIFT
));
796 nw64(ctrl_reg
, ctrl_val
);
797 nw64(test_cfg_reg
, test_cfg_val
);
799 /* Initialize all 4 lanes of the SERDES. */
800 for (i
= 0; i
< 4; i
++) {
801 u32 rxtx_ctrl
, glue0
;
803 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
806 err
= esr_read_glue0(np
, i
, &glue0
);
810 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
811 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
812 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
814 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
815 ESR_GLUE_CTRL0_THCNT
|
816 ESR_GLUE_CTRL0_BLTIME
);
817 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
818 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
819 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
820 (BLTIME_300_CYCLES
<<
821 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
823 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
826 err
= esr_write_glue0(np
, i
, glue0
);
835 sig
= nr64(ESR_INT_SIGNALS
);
838 mask
= ESR_INT_SIGNALS_P0_BITS
;
839 val
= (ESR_INT_SRDY0_P0
|
849 mask
= ESR_INT_SIGNALS_P1_BITS
;
850 val
= (ESR_INT_SRDY0_P1
|
863 if ((sig
& mask
) != val
) {
864 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
865 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
868 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
869 np
->port
, (int)(sig
& mask
), (int)val
);
872 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
873 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
877 static int serdes_init_1g(struct niu
*np
)
881 val
= nr64(ENET_SERDES_1_PLL_CFG
);
882 val
&= ~ENET_SERDES_PLL_FBDIV2
;
885 val
|= ENET_SERDES_PLL_HRATE0
;
888 val
|= ENET_SERDES_PLL_HRATE1
;
891 val
|= ENET_SERDES_PLL_HRATE2
;
894 val
|= ENET_SERDES_PLL_HRATE3
;
899 nw64(ENET_SERDES_1_PLL_CFG
, val
);
904 static int serdes_init_1g_serdes(struct niu
*np
)
906 struct niu_link_config
*lp
= &np
->link_config
;
907 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
908 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
910 u64 reset_val
, val_rd
;
912 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
913 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
914 ENET_SERDES_PLL_FBDIV0
;
917 reset_val
= ENET_SERDES_RESET_0
;
918 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
919 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
920 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
923 reset_val
= ENET_SERDES_RESET_1
;
924 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
925 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
926 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
932 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
933 ENET_SERDES_CTRL_SDET_1
|
934 ENET_SERDES_CTRL_SDET_2
|
935 ENET_SERDES_CTRL_SDET_3
|
936 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
937 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
938 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
939 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
940 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
941 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
942 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
943 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
946 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
947 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
948 ENET_SERDES_TEST_MD_0_SHIFT
) |
949 (ENET_TEST_MD_PAD_LOOPBACK
<<
950 ENET_SERDES_TEST_MD_1_SHIFT
) |
951 (ENET_TEST_MD_PAD_LOOPBACK
<<
952 ENET_SERDES_TEST_MD_2_SHIFT
) |
953 (ENET_TEST_MD_PAD_LOOPBACK
<<
954 ENET_SERDES_TEST_MD_3_SHIFT
));
957 nw64(ENET_SERDES_RESET
, reset_val
);
959 val_rd
= nr64(ENET_SERDES_RESET
);
960 val_rd
&= ~reset_val
;
962 nw64(ctrl_reg
, ctrl_val
);
963 nw64(test_cfg_reg
, test_cfg_val
);
964 nw64(ENET_SERDES_RESET
, val_rd
);
967 /* Initialize all 4 lanes of the SERDES. */
968 for (i
= 0; i
< 4; i
++) {
969 u32 rxtx_ctrl
, glue0
;
971 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
974 err
= esr_read_glue0(np
, i
, &glue0
);
978 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
979 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
980 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
982 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
983 ESR_GLUE_CTRL0_THCNT
|
984 ESR_GLUE_CTRL0_BLTIME
);
985 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
986 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
987 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
988 (BLTIME_300_CYCLES
<<
989 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
991 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
994 err
= esr_write_glue0(np
, i
, glue0
);
1000 sig
= nr64(ESR_INT_SIGNALS
);
1003 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
1008 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
1016 if ((sig
& mask
) != val
) {
1017 netdev_err(np
->dev
, "Port %u signal bits [%08x] are not [%08x]\n",
1018 np
->port
, (int)(sig
& mask
), (int)val
);
1025 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
1027 struct niu_link_config
*lp
= &np
->link_config
;
1031 unsigned long flags
;
1035 current_speed
= SPEED_INVALID
;
1036 current_duplex
= DUPLEX_INVALID
;
1038 spin_lock_irqsave(&np
->lock
, flags
);
1040 val
= nr64_pcs(PCS_MII_STAT
);
1042 if (val
& PCS_MII_STAT_LINK_STATUS
) {
1044 current_speed
= SPEED_1000
;
1045 current_duplex
= DUPLEX_FULL
;
1048 lp
->active_speed
= current_speed
;
1049 lp
->active_duplex
= current_duplex
;
1050 spin_unlock_irqrestore(&np
->lock
, flags
);
1052 *link_up_p
= link_up
;
1056 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
1058 unsigned long flags
;
1059 struct niu_link_config
*lp
= &np
->link_config
;
1066 if (!(np
->flags
& NIU_FLAGS_10G
))
1067 return link_status_1g_serdes(np
, link_up_p
);
1069 current_speed
= SPEED_INVALID
;
1070 current_duplex
= DUPLEX_INVALID
;
1071 spin_lock_irqsave(&np
->lock
, flags
);
1073 val
= nr64_xpcs(XPCS_STATUS(0));
1074 val2
= nr64_mac(XMAC_INTER2
);
1075 if (val2
& 0x01000000)
1078 if ((val
& 0x1000ULL
) && link_ok
) {
1080 current_speed
= SPEED_10000
;
1081 current_duplex
= DUPLEX_FULL
;
1083 lp
->active_speed
= current_speed
;
1084 lp
->active_duplex
= current_duplex
;
1085 spin_unlock_irqrestore(&np
->lock
, flags
);
1086 *link_up_p
= link_up
;
1090 static int link_status_mii(struct niu
*np
, int *link_up_p
)
1092 struct niu_link_config
*lp
= &np
->link_config
;
1094 int bmsr
, advert
, ctrl1000
, stat1000
, lpa
, bmcr
, estatus
;
1095 int supported
, advertising
, active_speed
, active_duplex
;
1097 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1098 if (unlikely(err
< 0))
1102 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1103 if (unlikely(err
< 0))
1107 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1108 if (unlikely(err
< 0))
1112 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1113 if (unlikely(err
< 0))
1117 if (likely(bmsr
& BMSR_ESTATEN
)) {
1118 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1119 if (unlikely(err
< 0))
1123 err
= mii_read(np
, np
->phy_addr
, MII_CTRL1000
);
1124 if (unlikely(err
< 0))
1128 err
= mii_read(np
, np
->phy_addr
, MII_STAT1000
);
1129 if (unlikely(err
< 0))
1133 estatus
= ctrl1000
= stat1000
= 0;
1136 if (bmsr
& BMSR_ANEGCAPABLE
)
1137 supported
|= SUPPORTED_Autoneg
;
1138 if (bmsr
& BMSR_10HALF
)
1139 supported
|= SUPPORTED_10baseT_Half
;
1140 if (bmsr
& BMSR_10FULL
)
1141 supported
|= SUPPORTED_10baseT_Full
;
1142 if (bmsr
& BMSR_100HALF
)
1143 supported
|= SUPPORTED_100baseT_Half
;
1144 if (bmsr
& BMSR_100FULL
)
1145 supported
|= SUPPORTED_100baseT_Full
;
1146 if (estatus
& ESTATUS_1000_THALF
)
1147 supported
|= SUPPORTED_1000baseT_Half
;
1148 if (estatus
& ESTATUS_1000_TFULL
)
1149 supported
|= SUPPORTED_1000baseT_Full
;
1150 lp
->supported
= supported
;
1153 if (advert
& ADVERTISE_10HALF
)
1154 advertising
|= ADVERTISED_10baseT_Half
;
1155 if (advert
& ADVERTISE_10FULL
)
1156 advertising
|= ADVERTISED_10baseT_Full
;
1157 if (advert
& ADVERTISE_100HALF
)
1158 advertising
|= ADVERTISED_100baseT_Half
;
1159 if (advert
& ADVERTISE_100FULL
)
1160 advertising
|= ADVERTISED_100baseT_Full
;
1161 if (ctrl1000
& ADVERTISE_1000HALF
)
1162 advertising
|= ADVERTISED_1000baseT_Half
;
1163 if (ctrl1000
& ADVERTISE_1000FULL
)
1164 advertising
|= ADVERTISED_1000baseT_Full
;
1166 if (bmcr
& BMCR_ANENABLE
) {
1169 lp
->active_autoneg
= 1;
1170 advertising
|= ADVERTISED_Autoneg
;
1173 neg1000
= (ctrl1000
<< 2) & stat1000
;
1175 if (neg1000
& (LPA_1000FULL
| LPA_1000HALF
))
1176 active_speed
= SPEED_1000
;
1177 else if (neg
& LPA_100
)
1178 active_speed
= SPEED_100
;
1179 else if (neg
& (LPA_10HALF
| LPA_10FULL
))
1180 active_speed
= SPEED_10
;
1182 active_speed
= SPEED_INVALID
;
1184 if ((neg1000
& LPA_1000FULL
) || (neg
& LPA_DUPLEX
))
1185 active_duplex
= DUPLEX_FULL
;
1186 else if (active_speed
!= SPEED_INVALID
)
1187 active_duplex
= DUPLEX_HALF
;
1189 active_duplex
= DUPLEX_INVALID
;
1191 lp
->active_autoneg
= 0;
1193 if ((bmcr
& BMCR_SPEED1000
) && !(bmcr
& BMCR_SPEED100
))
1194 active_speed
= SPEED_1000
;
1195 else if (bmcr
& BMCR_SPEED100
)
1196 active_speed
= SPEED_100
;
1198 active_speed
= SPEED_10
;
1200 if (bmcr
& BMCR_FULLDPLX
)
1201 active_duplex
= DUPLEX_FULL
;
1203 active_duplex
= DUPLEX_HALF
;
1206 lp
->active_advertising
= advertising
;
1207 lp
->active_speed
= active_speed
;
1208 lp
->active_duplex
= active_duplex
;
1209 *link_up_p
= !!(bmsr
& BMSR_LSTATUS
);
1214 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
1216 struct niu_link_config
*lp
= &np
->link_config
;
1217 u16 current_speed
, bmsr
;
1218 unsigned long flags
;
1223 current_speed
= SPEED_INVALID
;
1224 current_duplex
= DUPLEX_INVALID
;
1226 spin_lock_irqsave(&np
->lock
, flags
);
1230 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1235 if (bmsr
& BMSR_LSTATUS
) {
1236 u16 adv
, lpa
, common
, estat
;
1238 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1243 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1250 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1255 current_speed
= SPEED_1000
;
1256 current_duplex
= DUPLEX_FULL
;
1259 lp
->active_speed
= current_speed
;
1260 lp
->active_duplex
= current_duplex
;
1264 spin_unlock_irqrestore(&np
->lock
, flags
);
1266 *link_up_p
= link_up
;
1270 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1272 struct niu_link_config
*lp
= &np
->link_config
;
1273 unsigned long flags
;
1276 spin_lock_irqsave(&np
->lock
, flags
);
1278 err
= link_status_mii(np
, link_up_p
);
1279 lp
->supported
|= SUPPORTED_TP
;
1280 lp
->active_advertising
|= ADVERTISED_TP
;
1282 spin_unlock_irqrestore(&np
->lock
, flags
);
1286 static int bcm8704_reset(struct niu
*np
)
1290 err
= mdio_read(np
, np
->phy_addr
,
1291 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1292 if (err
< 0 || err
== 0xffff)
1295 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1301 while (--limit
>= 0) {
1302 err
= mdio_read(np
, np
->phy_addr
,
1303 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
1306 if (!(err
& BMCR_RESET
))
1310 netdev_err(np
->dev
, "Port %u PHY will not reset (bmcr=%04x)\n",
1311 np
->port
, (err
& 0xffff));
1317 /* When written, certain PHY registers need to be read back twice
1318 * in order for the bits to settle properly.
1320 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
1322 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1325 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1331 static int bcm8706_init_user_dev3(struct niu
*np
)
1336 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1337 BCM8704_USER_OPT_DIGITAL_CTRL
);
1340 err
&= ~USER_ODIG_CTRL_GPIOS
;
1341 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1342 err
|= USER_ODIG_CTRL_RESV2
;
1343 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1344 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1353 static int bcm8704_init_user_dev3(struct niu
*np
)
1357 err
= mdio_write(np
, np
->phy_addr
,
1358 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1359 (USER_CONTROL_OPTXRST_LVL
|
1360 USER_CONTROL_OPBIASFLT_LVL
|
1361 USER_CONTROL_OBTMPFLT_LVL
|
1362 USER_CONTROL_OPPRFLT_LVL
|
1363 USER_CONTROL_OPTXFLT_LVL
|
1364 USER_CONTROL_OPRXLOS_LVL
|
1365 USER_CONTROL_OPRXFLT_LVL
|
1366 USER_CONTROL_OPTXON_LVL
|
1367 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1371 err
= mdio_write(np
, np
->phy_addr
,
1372 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1373 (USER_PMD_TX_CTL_XFP_CLKEN
|
1374 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1375 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1376 USER_PMD_TX_CTL_TSCK_LPWREN
));
1380 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1383 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1387 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1388 BCM8704_USER_OPT_DIGITAL_CTRL
);
1391 err
&= ~USER_ODIG_CTRL_GPIOS
;
1392 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1393 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1394 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1403 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1407 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1408 MRVL88X2011_LED_8_TO_11_CTL
);
1412 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1413 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1415 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1416 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1419 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1423 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1424 MRVL88X2011_LED_BLINK_CTL
);
1426 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1429 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1430 MRVL88X2011_LED_BLINK_CTL
, err
);
1436 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1440 /* Set LED functions */
1441 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1446 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1450 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1451 MRVL88X2011_GENERAL_CTL
);
1455 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1457 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1458 MRVL88X2011_GENERAL_CTL
, err
);
1462 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1463 MRVL88X2011_PMA_PMD_CTL_1
);
1467 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1468 err
|= MRVL88X2011_LOOPBACK
;
1470 err
&= ~MRVL88X2011_LOOPBACK
;
1472 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1473 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1478 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1479 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1483 static int xcvr_diag_bcm870x(struct niu
*np
)
1485 u16 analog_stat0
, tx_alarm_status
;
1489 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1493 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np
->port
, err
);
1495 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1498 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np
->port
, err
);
1500 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1504 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np
->port
, err
);
1507 /* XXX dig this out it might not be so useful XXX */
1508 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1509 BCM8704_USER_ANALOG_STATUS0
);
1512 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1513 BCM8704_USER_ANALOG_STATUS0
);
1518 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1519 BCM8704_USER_TX_ALARM_STATUS
);
1522 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1523 BCM8704_USER_TX_ALARM_STATUS
);
1526 tx_alarm_status
= err
;
1528 if (analog_stat0
!= 0x03fc) {
1529 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1530 pr_info("Port %u cable not connected or bad cable\n",
1532 } else if (analog_stat0
== 0x639c) {
1533 pr_info("Port %u optical module is bad or missing\n",
1541 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1543 struct niu_link_config
*lp
= &np
->link_config
;
1546 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1551 err
&= ~BMCR_LOOPBACK
;
1553 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1554 err
|= BMCR_LOOPBACK
;
1556 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1564 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1569 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1570 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1573 val
= nr64_mac(XMAC_CONFIG
);
1574 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1575 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1576 nw64_mac(XMAC_CONFIG
, val
);
1578 val
= nr64(MIF_CONFIG
);
1579 val
|= MIF_CONFIG_INDIRECT_MODE
;
1580 nw64(MIF_CONFIG
, val
);
1582 err
= bcm8704_reset(np
);
1586 err
= xcvr_10g_set_lb_bcm870x(np
);
1590 err
= bcm8706_init_user_dev3(np
);
1594 err
= xcvr_diag_bcm870x(np
);
1601 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1605 err
= bcm8704_reset(np
);
1609 err
= bcm8704_init_user_dev3(np
);
1613 err
= xcvr_10g_set_lb_bcm870x(np
);
1617 err
= xcvr_diag_bcm870x(np
);
1624 static int xcvr_init_10g(struct niu
*np
)
1629 val
= nr64_mac(XMAC_CONFIG
);
1630 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1631 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1632 nw64_mac(XMAC_CONFIG
, val
);
1634 /* XXX shared resource, lock parent XXX */
1635 val
= nr64(MIF_CONFIG
);
1636 val
|= MIF_CONFIG_INDIRECT_MODE
;
1637 nw64(MIF_CONFIG
, val
);
1639 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1640 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1642 /* handle different phy types */
1643 switch (phy_id
& NIU_PHY_ID_MASK
) {
1644 case NIU_PHY_ID_MRVL88X2011
:
1645 err
= xcvr_init_10g_mrvl88x2011(np
);
1648 default: /* bcom 8704 */
1649 err
= xcvr_init_10g_bcm8704(np
);
1656 static int mii_reset(struct niu
*np
)
1660 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1665 while (--limit
>= 0) {
1667 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1670 if (!(err
& BMCR_RESET
))
1674 netdev_err(np
->dev
, "Port %u MII would not reset, bmcr[%04x]\n",
1682 static int xcvr_init_1g_rgmii(struct niu
*np
)
1686 u16 bmcr
, bmsr
, estat
;
1688 val
= nr64(MIF_CONFIG
);
1689 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1690 nw64(MIF_CONFIG
, val
);
1692 err
= mii_reset(np
);
1696 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1702 if (bmsr
& BMSR_ESTATEN
) {
1703 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1710 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1714 if (bmsr
& BMSR_ESTATEN
) {
1717 if (estat
& ESTATUS_1000_TFULL
)
1718 ctrl1000
|= ADVERTISE_1000FULL
;
1719 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1724 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1726 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1730 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1733 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1735 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1742 static int mii_init_common(struct niu
*np
)
1744 struct niu_link_config
*lp
= &np
->link_config
;
1745 u16 bmcr
, bmsr
, adv
, estat
;
1748 err
= mii_reset(np
);
1752 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1758 if (bmsr
& BMSR_ESTATEN
) {
1759 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1766 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1770 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1771 bmcr
|= BMCR_LOOPBACK
;
1772 if (lp
->active_speed
== SPEED_1000
)
1773 bmcr
|= BMCR_SPEED1000
;
1774 if (lp
->active_duplex
== DUPLEX_FULL
)
1775 bmcr
|= BMCR_FULLDPLX
;
1778 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1781 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1782 BCM5464R_AUX_CTL_WRITE_1
);
1783 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1791 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1792 if ((bmsr
& BMSR_10HALF
) &&
1793 (lp
->advertising
& ADVERTISED_10baseT_Half
))
1794 adv
|= ADVERTISE_10HALF
;
1795 if ((bmsr
& BMSR_10FULL
) &&
1796 (lp
->advertising
& ADVERTISED_10baseT_Full
))
1797 adv
|= ADVERTISE_10FULL
;
1798 if ((bmsr
& BMSR_100HALF
) &&
1799 (lp
->advertising
& ADVERTISED_100baseT_Half
))
1800 adv
|= ADVERTISE_100HALF
;
1801 if ((bmsr
& BMSR_100FULL
) &&
1802 (lp
->advertising
& ADVERTISED_100baseT_Full
))
1803 adv
|= ADVERTISE_100FULL
;
1804 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1808 if (likely(bmsr
& BMSR_ESTATEN
)) {
1810 if ((estat
& ESTATUS_1000_THALF
) &&
1811 (lp
->advertising
& ADVERTISED_1000baseT_Half
))
1812 ctrl1000
|= ADVERTISE_1000HALF
;
1813 if ((estat
& ESTATUS_1000_TFULL
) &&
1814 (lp
->advertising
& ADVERTISED_1000baseT_Full
))
1815 ctrl1000
|= ADVERTISE_1000FULL
;
1816 err
= mii_write(np
, np
->phy_addr
,
1817 MII_CTRL1000
, ctrl1000
);
1822 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1827 if (lp
->duplex
== DUPLEX_FULL
) {
1828 bmcr
|= BMCR_FULLDPLX
;
1830 } else if (lp
->duplex
== DUPLEX_HALF
)
1835 if (lp
->speed
== SPEED_1000
) {
1836 /* if X-full requested while not supported, or
1837 X-half requested while not supported... */
1838 if ((fulldpx
&& !(estat
& ESTATUS_1000_TFULL
)) ||
1839 (!fulldpx
&& !(estat
& ESTATUS_1000_THALF
)))
1841 bmcr
|= BMCR_SPEED1000
;
1842 } else if (lp
->speed
== SPEED_100
) {
1843 if ((fulldpx
&& !(bmsr
& BMSR_100FULL
)) ||
1844 (!fulldpx
&& !(bmsr
& BMSR_100HALF
)))
1846 bmcr
|= BMCR_SPEED100
;
1847 } else if (lp
->speed
== SPEED_10
) {
1848 if ((fulldpx
&& !(bmsr
& BMSR_10FULL
)) ||
1849 (!fulldpx
&& !(bmsr
& BMSR_10HALF
)))
1855 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1860 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1865 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1870 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1871 np
->port
, bmcr
, bmsr
);
1877 static int xcvr_init_1g(struct niu
*np
)
1881 /* XXX shared resource, lock parent XXX */
1882 val
= nr64(MIF_CONFIG
);
1883 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1884 nw64(MIF_CONFIG
, val
);
1886 return mii_init_common(np
);
1889 static int niu_xcvr_init(struct niu
*np
)
1891 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1896 err
= ops
->xcvr_init(np
);
1901 static int niu_serdes_init(struct niu
*np
)
1903 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1907 if (ops
->serdes_init
)
1908 err
= ops
->serdes_init(np
);
1913 static void niu_init_xif(struct niu
*);
1914 static void niu_handle_led(struct niu
*, int status
);
1916 static int niu_link_status_common(struct niu
*np
, int link_up
)
1918 struct niu_link_config
*lp
= &np
->link_config
;
1919 struct net_device
*dev
= np
->dev
;
1920 unsigned long flags
;
1922 if (!netif_carrier_ok(dev
) && link_up
) {
1923 netif_info(np
, link
, dev
, "Link is up at %s, %s duplex\n",
1924 lp
->active_speed
== SPEED_10000
? "10Gb/sec" :
1925 lp
->active_speed
== SPEED_1000
? "1Gb/sec" :
1926 lp
->active_speed
== SPEED_100
? "100Mbit/sec" :
1928 lp
->active_duplex
== DUPLEX_FULL
? "full" : "half");
1930 spin_lock_irqsave(&np
->lock
, flags
);
1932 niu_handle_led(np
, 1);
1933 spin_unlock_irqrestore(&np
->lock
, flags
);
1935 netif_carrier_on(dev
);
1936 } else if (netif_carrier_ok(dev
) && !link_up
) {
1937 netif_warn(np
, link
, dev
, "Link is down\n");
1938 spin_lock_irqsave(&np
->lock
, flags
);
1939 niu_handle_led(np
, 0);
1940 spin_unlock_irqrestore(&np
->lock
, flags
);
1941 netif_carrier_off(dev
);
1947 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1949 int err
, link_up
, pma_status
, pcs_status
;
1953 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1954 MRVL88X2011_10G_PMD_STATUS_2
);
1958 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1959 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1960 MRVL88X2011_PMA_PMD_STATUS_1
);
1964 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1966 /* Check PMC Register : 3.0001.2 == 1: read twice */
1967 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1968 MRVL88X2011_PMA_PMD_STATUS_1
);
1972 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1973 MRVL88X2011_PMA_PMD_STATUS_1
);
1977 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1979 /* Check XGXS Register : 4.0018.[0-3,12] */
1980 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1981 MRVL88X2011_10G_XGXS_LANE_STAT
);
1985 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1986 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1987 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1989 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1991 np
->link_config
.active_speed
= SPEED_10000
;
1992 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1995 mrvl88x2011_act_led(np
, (link_up
?
1996 MRVL88X2011_LED_CTL_PCS_ACT
:
1997 MRVL88X2011_LED_CTL_OFF
));
1999 *link_up_p
= link_up
;
2003 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
2008 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2009 BCM8704_PMD_RCV_SIGDET
);
2010 if (err
< 0 || err
== 0xffff)
2012 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2017 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2018 BCM8704_PCS_10G_R_STATUS
);
2022 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2027 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2028 BCM8704_PHYXS_XGXS_LANE_STAT
);
2031 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2032 PHYXS_XGXS_LANE_STAT_MAGIC
|
2033 PHYXS_XGXS_LANE_STAT_PATTEST
|
2034 PHYXS_XGXS_LANE_STAT_LANE3
|
2035 PHYXS_XGXS_LANE_STAT_LANE2
|
2036 PHYXS_XGXS_LANE_STAT_LANE1
|
2037 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2039 np
->link_config
.active_speed
= SPEED_INVALID
;
2040 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
2045 np
->link_config
.active_speed
= SPEED_10000
;
2046 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2050 *link_up_p
= link_up
;
2054 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
2060 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
2061 BCM8704_PMD_RCV_SIGDET
);
2064 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
2069 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
2070 BCM8704_PCS_10G_R_STATUS
);
2073 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
2078 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
2079 BCM8704_PHYXS_XGXS_LANE_STAT
);
2083 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
2084 PHYXS_XGXS_LANE_STAT_MAGIC
|
2085 PHYXS_XGXS_LANE_STAT_LANE3
|
2086 PHYXS_XGXS_LANE_STAT_LANE2
|
2087 PHYXS_XGXS_LANE_STAT_LANE1
|
2088 PHYXS_XGXS_LANE_STAT_LANE0
)) {
2094 np
->link_config
.active_speed
= SPEED_10000
;
2095 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2099 *link_up_p
= link_up
;
2103 static int link_status_10g(struct niu
*np
, int *link_up_p
)
2105 unsigned long flags
;
2108 spin_lock_irqsave(&np
->lock
, flags
);
2110 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2113 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
2114 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
2116 /* handle different phy types */
2117 switch (phy_id
& NIU_PHY_ID_MASK
) {
2118 case NIU_PHY_ID_MRVL88X2011
:
2119 err
= link_status_10g_mrvl(np
, link_up_p
);
2122 default: /* bcom 8704 */
2123 err
= link_status_10g_bcom(np
, link_up_p
);
2128 spin_unlock_irqrestore(&np
->lock
, flags
);
2133 static int niu_10g_phy_present(struct niu
*np
)
2137 sig
= nr64(ESR_INT_SIGNALS
);
2140 mask
= ESR_INT_SIGNALS_P0_BITS
;
2141 val
= (ESR_INT_SRDY0_P0
|
2144 ESR_INT_XDP_P0_CH3
|
2145 ESR_INT_XDP_P0_CH2
|
2146 ESR_INT_XDP_P0_CH1
|
2147 ESR_INT_XDP_P0_CH0
);
2151 mask
= ESR_INT_SIGNALS_P1_BITS
;
2152 val
= (ESR_INT_SRDY0_P1
|
2155 ESR_INT_XDP_P1_CH3
|
2156 ESR_INT_XDP_P1_CH2
|
2157 ESR_INT_XDP_P1_CH1
|
2158 ESR_INT_XDP_P1_CH0
);
2165 if ((sig
& mask
) != val
)
2170 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
2172 unsigned long flags
;
2175 int phy_present_prev
;
2177 spin_lock_irqsave(&np
->lock
, flags
);
2179 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
2180 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
2182 phy_present
= niu_10g_phy_present(np
);
2183 if (phy_present
!= phy_present_prev
) {
2186 /* A NEM was just plugged in */
2187 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2188 if (np
->phy_ops
->xcvr_init
)
2189 err
= np
->phy_ops
->xcvr_init(np
);
2191 err
= mdio_read(np
, np
->phy_addr
,
2192 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
2193 if (err
== 0xffff) {
2194 /* No mdio, back-to-back XAUI */
2198 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2201 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
2203 netif_warn(np
, link
, np
->dev
,
2204 "Hotplug PHY Removed\n");
2208 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) {
2209 err
= link_status_10g_bcm8706(np
, link_up_p
);
2210 if (err
== 0xffff) {
2211 /* No mdio, back-to-back XAUI: it is C10NEM */
2213 np
->link_config
.active_speed
= SPEED_10000
;
2214 np
->link_config
.active_duplex
= DUPLEX_FULL
;
2219 spin_unlock_irqrestore(&np
->lock
, flags
);
2224 static int niu_link_status(struct niu
*np
, int *link_up_p
)
2226 const struct niu_phy_ops
*ops
= np
->phy_ops
;
2230 if (ops
->link_status
)
2231 err
= ops
->link_status(np
, link_up_p
);
2236 static void niu_timer(unsigned long __opaque
)
2238 struct niu
*np
= (struct niu
*) __opaque
;
2242 err
= niu_link_status(np
, &link_up
);
2244 niu_link_status_common(np
, link_up
);
2246 if (netif_carrier_ok(np
->dev
))
2250 np
->timer
.expires
= jiffies
+ off
;
2252 add_timer(&np
->timer
);
2255 static const struct niu_phy_ops phy_ops_10g_serdes
= {
2256 .serdes_init
= serdes_init_10g_serdes
,
2257 .link_status
= link_status_10g_serdes
,
2260 static const struct niu_phy_ops phy_ops_10g_serdes_niu
= {
2261 .serdes_init
= serdes_init_niu_10g_serdes
,
2262 .link_status
= link_status_10g_serdes
,
2265 static const struct niu_phy_ops phy_ops_1g_serdes_niu
= {
2266 .serdes_init
= serdes_init_niu_1g_serdes
,
2267 .link_status
= link_status_1g_serdes
,
2270 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
2271 .xcvr_init
= xcvr_init_1g_rgmii
,
2272 .link_status
= link_status_1g_rgmii
,
2275 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
2276 .serdes_init
= serdes_init_niu_10g_fiber
,
2277 .xcvr_init
= xcvr_init_10g
,
2278 .link_status
= link_status_10g
,
2281 static const struct niu_phy_ops phy_ops_10g_fiber
= {
2282 .serdes_init
= serdes_init_10g
,
2283 .xcvr_init
= xcvr_init_10g
,
2284 .link_status
= link_status_10g
,
2287 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
2288 .serdes_init
= serdes_init_10g
,
2289 .xcvr_init
= xcvr_init_10g_bcm8706
,
2290 .link_status
= link_status_10g_hotplug
,
2293 static const struct niu_phy_ops phy_ops_niu_10g_hotplug
= {
2294 .serdes_init
= serdes_init_niu_10g_fiber
,
2295 .xcvr_init
= xcvr_init_10g_bcm8706
,
2296 .link_status
= link_status_10g_hotplug
,
2299 static const struct niu_phy_ops phy_ops_10g_copper
= {
2300 .serdes_init
= serdes_init_10g
,
2301 .link_status
= link_status_10g
, /* XXX */
2304 static const struct niu_phy_ops phy_ops_1g_fiber
= {
2305 .serdes_init
= serdes_init_1g
,
2306 .xcvr_init
= xcvr_init_1g
,
2307 .link_status
= link_status_1g
,
2310 static const struct niu_phy_ops phy_ops_1g_copper
= {
2311 .xcvr_init
= xcvr_init_1g
,
2312 .link_status
= link_status_1g
,
2315 struct niu_phy_template
{
2316 const struct niu_phy_ops
*ops
;
2320 static const struct niu_phy_template phy_template_niu_10g_fiber
= {
2321 .ops
= &phy_ops_10g_fiber_niu
,
2322 .phy_addr_base
= 16,
2325 static const struct niu_phy_template phy_template_niu_10g_serdes
= {
2326 .ops
= &phy_ops_10g_serdes_niu
,
2330 static const struct niu_phy_template phy_template_niu_1g_serdes
= {
2331 .ops
= &phy_ops_1g_serdes_niu
,
2335 static const struct niu_phy_template phy_template_10g_fiber
= {
2336 .ops
= &phy_ops_10g_fiber
,
2340 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2341 .ops
= &phy_ops_10g_fiber_hotplug
,
2345 static const struct niu_phy_template phy_template_niu_10g_hotplug
= {
2346 .ops
= &phy_ops_niu_10g_hotplug
,
2350 static const struct niu_phy_template phy_template_10g_copper
= {
2351 .ops
= &phy_ops_10g_copper
,
2352 .phy_addr_base
= 10,
2355 static const struct niu_phy_template phy_template_1g_fiber
= {
2356 .ops
= &phy_ops_1g_fiber
,
2360 static const struct niu_phy_template phy_template_1g_copper
= {
2361 .ops
= &phy_ops_1g_copper
,
2365 static const struct niu_phy_template phy_template_1g_rgmii
= {
2366 .ops
= &phy_ops_1g_rgmii
,
2370 static const struct niu_phy_template phy_template_10g_serdes
= {
2371 .ops
= &phy_ops_10g_serdes
,
2375 static int niu_atca_port_num
[4] = {
2379 static int serdes_init_10g_serdes(struct niu
*np
)
2381 struct niu_link_config
*lp
= &np
->link_config
;
2382 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2383 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2388 reset_val
= ENET_SERDES_RESET_0
;
2389 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2390 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2391 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2394 reset_val
= ENET_SERDES_RESET_1
;
2395 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2396 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2397 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2403 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2404 ENET_SERDES_CTRL_SDET_1
|
2405 ENET_SERDES_CTRL_SDET_2
|
2406 ENET_SERDES_CTRL_SDET_3
|
2407 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2408 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2409 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2410 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2411 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2412 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2413 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2414 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2417 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2418 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2419 ENET_SERDES_TEST_MD_0_SHIFT
) |
2420 (ENET_TEST_MD_PAD_LOOPBACK
<<
2421 ENET_SERDES_TEST_MD_1_SHIFT
) |
2422 (ENET_TEST_MD_PAD_LOOPBACK
<<
2423 ENET_SERDES_TEST_MD_2_SHIFT
) |
2424 (ENET_TEST_MD_PAD_LOOPBACK
<<
2425 ENET_SERDES_TEST_MD_3_SHIFT
));
2429 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2430 nw64(ctrl_reg
, ctrl_val
);
2431 nw64(test_cfg_reg
, test_cfg_val
);
2433 /* Initialize all 4 lanes of the SERDES. */
2434 for (i
= 0; i
< 4; i
++) {
2435 u32 rxtx_ctrl
, glue0
;
2438 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2441 err
= esr_read_glue0(np
, i
, &glue0
);
2445 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2446 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2447 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2449 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2450 ESR_GLUE_CTRL0_THCNT
|
2451 ESR_GLUE_CTRL0_BLTIME
);
2452 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2453 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2454 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2455 (BLTIME_300_CYCLES
<<
2456 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2458 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2461 err
= esr_write_glue0(np
, i
, glue0
);
2467 sig
= nr64(ESR_INT_SIGNALS
);
2470 mask
= ESR_INT_SIGNALS_P0_BITS
;
2471 val
= (ESR_INT_SRDY0_P0
|
2474 ESR_INT_XDP_P0_CH3
|
2475 ESR_INT_XDP_P0_CH2
|
2476 ESR_INT_XDP_P0_CH1
|
2477 ESR_INT_XDP_P0_CH0
);
2481 mask
= ESR_INT_SIGNALS_P1_BITS
;
2482 val
= (ESR_INT_SRDY0_P1
|
2485 ESR_INT_XDP_P1_CH3
|
2486 ESR_INT_XDP_P1_CH2
|
2487 ESR_INT_XDP_P1_CH1
|
2488 ESR_INT_XDP_P1_CH0
);
2495 if ((sig
& mask
) != val
) {
2497 err
= serdes_init_1g_serdes(np
);
2499 np
->flags
&= ~NIU_FLAGS_10G
;
2500 np
->mac_xcvr
= MAC_XCVR_PCS
;
2502 netdev_err(np
->dev
, "Port %u 10G/1G SERDES Link Failed\n",
2511 static int niu_determine_phy_disposition(struct niu
*np
)
2513 struct niu_parent
*parent
= np
->parent
;
2514 u8 plat_type
= parent
->plat_type
;
2515 const struct niu_phy_template
*tp
;
2516 u32 phy_addr_off
= 0;
2518 if (plat_type
== PLAT_TYPE_NIU
) {
2522 NIU_FLAGS_XCVR_SERDES
)) {
2523 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2525 tp
= &phy_template_niu_10g_serdes
;
2527 case NIU_FLAGS_XCVR_SERDES
:
2529 tp
= &phy_template_niu_1g_serdes
;
2531 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2534 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2535 tp
= &phy_template_niu_10g_hotplug
;
2541 tp
= &phy_template_niu_10g_fiber
;
2542 phy_addr_off
+= np
->port
;
2550 NIU_FLAGS_XCVR_SERDES
)) {
2553 tp
= &phy_template_1g_copper
;
2554 if (plat_type
== PLAT_TYPE_VF_P0
)
2556 else if (plat_type
== PLAT_TYPE_VF_P1
)
2559 phy_addr_off
+= (np
->port
^ 0x3);
2564 tp
= &phy_template_10g_copper
;
2567 case NIU_FLAGS_FIBER
:
2569 tp
= &phy_template_1g_fiber
;
2572 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2574 tp
= &phy_template_10g_fiber
;
2575 if (plat_type
== PLAT_TYPE_VF_P0
||
2576 plat_type
== PLAT_TYPE_VF_P1
)
2578 phy_addr_off
+= np
->port
;
2579 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2580 tp
= &phy_template_10g_fiber_hotplug
;
2588 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2589 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2590 case NIU_FLAGS_XCVR_SERDES
:
2594 tp
= &phy_template_10g_serdes
;
2598 tp
= &phy_template_1g_rgmii
;
2604 phy_addr_off
= niu_atca_port_num
[np
->port
];
2612 np
->phy_ops
= tp
->ops
;
2613 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2618 static int niu_init_link(struct niu
*np
)
2620 struct niu_parent
*parent
= np
->parent
;
2623 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2624 err
= niu_xcvr_init(np
);
2629 err
= niu_serdes_init(np
);
2630 if (err
&& !(np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2633 err
= niu_xcvr_init(np
);
2634 if (!err
|| (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
))
2635 niu_link_status(np
, &ignore
);
2639 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2641 u16 reg0
= addr
[4] << 8 | addr
[5];
2642 u16 reg1
= addr
[2] << 8 | addr
[3];
2643 u16 reg2
= addr
[0] << 8 | addr
[1];
2645 if (np
->flags
& NIU_FLAGS_XMAC
) {
2646 nw64_mac(XMAC_ADDR0
, reg0
);
2647 nw64_mac(XMAC_ADDR1
, reg1
);
2648 nw64_mac(XMAC_ADDR2
, reg2
);
2650 nw64_mac(BMAC_ADDR0
, reg0
);
2651 nw64_mac(BMAC_ADDR1
, reg1
);
2652 nw64_mac(BMAC_ADDR2
, reg2
);
2656 static int niu_num_alt_addr(struct niu
*np
)
2658 if (np
->flags
& NIU_FLAGS_XMAC
)
2659 return XMAC_NUM_ALT_ADDR
;
2661 return BMAC_NUM_ALT_ADDR
;
2664 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2666 u16 reg0
= addr
[4] << 8 | addr
[5];
2667 u16 reg1
= addr
[2] << 8 | addr
[3];
2668 u16 reg2
= addr
[0] << 8 | addr
[1];
2670 if (index
>= niu_num_alt_addr(np
))
2673 if (np
->flags
& NIU_FLAGS_XMAC
) {
2674 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2675 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2676 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2678 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2679 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2680 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2686 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2691 if (index
>= niu_num_alt_addr(np
))
2694 if (np
->flags
& NIU_FLAGS_XMAC
) {
2695 reg
= XMAC_ADDR_CMPEN
;
2698 reg
= BMAC_ADDR_CMPEN
;
2699 mask
= 1 << (index
+ 1);
2702 val
= nr64_mac(reg
);
2712 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2713 int num
, int mac_pref
)
2715 u64 val
= nr64_mac(reg
);
2716 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2719 val
|= HOST_INFO_MPR
;
2723 static int __set_rdc_table_num(struct niu
*np
,
2724 int xmac_index
, int bmac_index
,
2725 int rdc_table_num
, int mac_pref
)
2729 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2731 if (np
->flags
& NIU_FLAGS_XMAC
)
2732 reg
= XMAC_HOST_INFO(xmac_index
);
2734 reg
= BMAC_HOST_INFO(bmac_index
);
2735 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2739 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2742 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2745 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2748 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2751 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2752 int table_num
, int mac_pref
)
2754 if (idx
>= niu_num_alt_addr(np
))
2756 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2759 static u64
vlan_entry_set_parity(u64 reg_val
)
2764 port01_mask
= 0x00ff;
2765 port23_mask
= 0xff00;
2767 if (hweight64(reg_val
& port01_mask
) & 1)
2768 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2770 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2772 if (hweight64(reg_val
& port23_mask
) & 1)
2773 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2775 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2780 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2781 int port
, int vpr
, int rdc_table
)
2783 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2785 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2786 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2787 ENET_VLAN_TBL_SHIFT(port
));
2789 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2790 ENET_VLAN_TBL_SHIFT(port
));
2791 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2793 reg_val
= vlan_entry_set_parity(reg_val
);
2795 nw64(ENET_VLAN_TBL(index
), reg_val
);
2798 static void vlan_tbl_clear(struct niu
*np
)
2802 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2803 nw64(ENET_VLAN_TBL(i
), 0);
2806 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2810 while (--limit
> 0) {
2811 if (nr64(TCAM_CTL
) & bit
)
2821 static int tcam_flush(struct niu
*np
, int index
)
2823 nw64(TCAM_KEY_0
, 0x00);
2824 nw64(TCAM_KEY_MASK_0
, 0xff);
2825 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2827 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2831 static int tcam_read(struct niu
*np
, int index
,
2832 u64
*key
, u64
*mask
)
2836 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2837 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2839 key
[0] = nr64(TCAM_KEY_0
);
2840 key
[1] = nr64(TCAM_KEY_1
);
2841 key
[2] = nr64(TCAM_KEY_2
);
2842 key
[3] = nr64(TCAM_KEY_3
);
2843 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2844 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2845 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2846 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2852 static int tcam_write(struct niu
*np
, int index
,
2853 u64
*key
, u64
*mask
)
2855 nw64(TCAM_KEY_0
, key
[0]);
2856 nw64(TCAM_KEY_1
, key
[1]);
2857 nw64(TCAM_KEY_2
, key
[2]);
2858 nw64(TCAM_KEY_3
, key
[3]);
2859 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2860 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2861 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2862 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2863 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2865 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2869 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2873 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2874 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2876 *data
= nr64(TCAM_KEY_1
);
2882 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2884 nw64(TCAM_KEY_1
, assoc_data
);
2885 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2887 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2890 static void tcam_enable(struct niu
*np
, int on
)
2892 u64 val
= nr64(FFLP_CFG_1
);
2895 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2897 val
|= FFLP_CFG_1_TCAM_DIS
;
2898 nw64(FFLP_CFG_1
, val
);
2901 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2903 u64 val
= nr64(FFLP_CFG_1
);
2905 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2907 FFLP_CFG_1_CAMRATIO
);
2908 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2909 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2910 nw64(FFLP_CFG_1
, val
);
2912 val
= nr64(FFLP_CFG_1
);
2913 val
|= FFLP_CFG_1_FFLPINITDONE
;
2914 nw64(FFLP_CFG_1
, val
);
2917 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2923 if (class < CLASS_CODE_ETHERTYPE1
||
2924 class > CLASS_CODE_ETHERTYPE2
)
2927 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2939 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2945 if (class < CLASS_CODE_ETHERTYPE1
||
2946 class > CLASS_CODE_ETHERTYPE2
||
2947 (ether_type
& ~(u64
)0xffff) != 0)
2950 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2952 val
&= ~L2_CLS_ETYPE
;
2953 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2960 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2966 if (class < CLASS_CODE_USER_PROG1
||
2967 class > CLASS_CODE_USER_PROG4
)
2970 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2973 val
|= L3_CLS_VALID
;
2975 val
&= ~L3_CLS_VALID
;
2981 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2982 int ipv6
, u64 protocol_id
,
2983 u64 tos_mask
, u64 tos_val
)
2988 if (class < CLASS_CODE_USER_PROG1
||
2989 class > CLASS_CODE_USER_PROG4
||
2990 (protocol_id
& ~(u64
)0xff) != 0 ||
2991 (tos_mask
& ~(u64
)0xff) != 0 ||
2992 (tos_val
& ~(u64
)0xff) != 0)
2995 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2997 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
2998 L3_CLS_TOSMASK
| L3_CLS_TOS
);
3000 val
|= L3_CLS_IPVER
;
3001 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
3002 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
3003 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
3009 static int tcam_early_init(struct niu
*np
)
3015 tcam_set_lat_and_ratio(np
,
3016 DEFAULT_TCAM_LATENCY
,
3017 DEFAULT_TCAM_ACCESS_RATIO
);
3018 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
3019 err
= tcam_user_eth_class_enable(np
, i
, 0);
3023 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
3024 err
= tcam_user_ip_class_enable(np
, i
, 0);
3032 static int tcam_flush_all(struct niu
*np
)
3036 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
3037 int err
= tcam_flush(np
, i
);
3044 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
3046 return (u64
)index
| (num_entries
== 1 ? HASH_TBL_ADDR_AUTOINC
: 0);
3050 static int hash_read(struct niu
*np
, unsigned long partition
,
3051 unsigned long index
, unsigned long num_entries
,
3054 u64 val
= hash_addr_regval(index
, num_entries
);
3057 if (partition
>= FCRAM_NUM_PARTITIONS
||
3058 index
+ num_entries
> FCRAM_SIZE
)
3061 nw64(HASH_TBL_ADDR(partition
), val
);
3062 for (i
= 0; i
< num_entries
; i
++)
3063 data
[i
] = nr64(HASH_TBL_DATA(partition
));
3069 static int hash_write(struct niu
*np
, unsigned long partition
,
3070 unsigned long index
, unsigned long num_entries
,
3073 u64 val
= hash_addr_regval(index
, num_entries
);
3076 if (partition
>= FCRAM_NUM_PARTITIONS
||
3077 index
+ (num_entries
* 8) > FCRAM_SIZE
)
3080 nw64(HASH_TBL_ADDR(partition
), val
);
3081 for (i
= 0; i
< num_entries
; i
++)
3082 nw64(HASH_TBL_DATA(partition
), data
[i
]);
3087 static void fflp_reset(struct niu
*np
)
3091 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
3093 nw64(FFLP_CFG_1
, 0);
3095 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
3096 nw64(FFLP_CFG_1
, val
);
3099 static void fflp_set_timings(struct niu
*np
)
3101 u64 val
= nr64(FFLP_CFG_1
);
3103 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
3104 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
3105 nw64(FFLP_CFG_1
, val
);
3107 val
= nr64(FFLP_CFG_1
);
3108 val
|= FFLP_CFG_1_FFLPINITDONE
;
3109 nw64(FFLP_CFG_1
, val
);
3111 val
= nr64(FCRAM_REF_TMR
);
3112 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
3113 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
3114 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
3115 nw64(FCRAM_REF_TMR
, val
);
3118 static int fflp_set_partition(struct niu
*np
, u64 partition
,
3119 u64 mask
, u64 base
, int enable
)
3124 if (partition
>= FCRAM_NUM_PARTITIONS
||
3125 (mask
& ~(u64
)0x1f) != 0 ||
3126 (base
& ~(u64
)0x1f) != 0)
3129 reg
= FLW_PRT_SEL(partition
);
3132 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
3133 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
3134 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
3136 val
|= FLW_PRT_SEL_EXT
;
3142 static int fflp_disable_all_partitions(struct niu
*np
)
3146 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
3147 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
3154 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
3156 u64 val
= nr64(FFLP_CFG_1
);
3159 val
|= FFLP_CFG_1_LLCSNAP
;
3161 val
&= ~FFLP_CFG_1_LLCSNAP
;
3162 nw64(FFLP_CFG_1
, val
);
3165 static void fflp_errors_enable(struct niu
*np
, int on
)
3167 u64 val
= nr64(FFLP_CFG_1
);
3170 val
&= ~FFLP_CFG_1_ERRORDIS
;
3172 val
|= FFLP_CFG_1_ERRORDIS
;
3173 nw64(FFLP_CFG_1
, val
);
3176 static int fflp_hash_clear(struct niu
*np
)
3178 struct fcram_hash_ipv4 ent
;
3181 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3182 memset(&ent
, 0, sizeof(ent
));
3183 ent
.header
= HASH_HEADER_EXT
;
3185 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
3186 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
3193 static int fflp_early_init(struct niu
*np
)
3195 struct niu_parent
*parent
;
3196 unsigned long flags
;
3199 niu_lock_parent(np
, flags
);
3201 parent
= np
->parent
;
3203 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
3204 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3206 fflp_set_timings(np
);
3207 err
= fflp_disable_all_partitions(np
);
3209 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3210 "fflp_disable_all_partitions failed, err=%d\n",
3216 err
= tcam_early_init(np
);
3218 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3219 "tcam_early_init failed, err=%d\n", err
);
3222 fflp_llcsnap_enable(np
, 1);
3223 fflp_errors_enable(np
, 0);
3227 err
= tcam_flush_all(np
);
3229 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3230 "tcam_flush_all failed, err=%d\n", err
);
3233 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
3234 err
= fflp_hash_clear(np
);
3236 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
3237 "fflp_hash_clear failed, err=%d\n",
3245 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
3248 niu_unlock_parent(np
, flags
);
3252 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3254 if (class_code
< CLASS_CODE_USER_PROG1
||
3255 class_code
> CLASS_CODE_SCTP_IPV6
)
3258 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3262 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
3264 if (class_code
< CLASS_CODE_USER_PROG1
||
3265 class_code
> CLASS_CODE_SCTP_IPV6
)
3268 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
3272 /* Entries for the ports are interleaved in the TCAM */
3273 static u16
tcam_get_index(struct niu
*np
, u16 idx
)
3275 /* One entry reserved for IP fragment rule */
3276 if (idx
>= (np
->clas
.tcam_sz
- 1))
3278 return np
->clas
.tcam_top
+ ((idx
+1) * np
->parent
->num_ports
);
3281 static u16
tcam_get_size(struct niu
*np
)
3283 /* One entry reserved for IP fragment rule */
3284 return np
->clas
.tcam_sz
- 1;
3287 static u16
tcam_get_valid_entry_cnt(struct niu
*np
)
3289 /* One entry reserved for IP fragment rule */
3290 return np
->clas
.tcam_valid_entries
- 1;
3293 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
3294 u32 offset
, u32 size
)
3296 int i
= skb_shinfo(skb
)->nr_frags
;
3297 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3300 frag
->page_offset
= offset
;
3304 skb
->data_len
+= size
;
3305 skb
->truesize
+= size
;
3307 skb_shinfo(skb
)->nr_frags
= i
+ 1;
3310 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
3313 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
3315 return a
& (MAX_RBR_RING_SIZE
- 1);
3318 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
3319 struct page
***link
)
3321 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
3322 struct page
*p
, **pp
;
3325 pp
= &rp
->rxhash
[h
];
3326 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
3327 if (p
->index
== addr
) {
3338 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
3340 unsigned int h
= niu_hash_rxaddr(rp
, base
);
3343 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
3344 rp
->rxhash
[h
] = page
;
3347 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
3348 gfp_t mask
, int start_index
)
3354 page
= alloc_page(mask
);
3358 addr
= np
->ops
->map_page(np
->device
, page
, 0,
3359 PAGE_SIZE
, DMA_FROM_DEVICE
);
3361 niu_hash_page(rp
, page
, addr
);
3362 if (rp
->rbr_blocks_per_page
> 1)
3363 atomic_add(rp
->rbr_blocks_per_page
- 1,
3364 &compound_head(page
)->_count
);
3366 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
3367 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
3369 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
3370 addr
+= rp
->rbr_block_size
;
3376 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3378 int index
= rp
->rbr_index
;
3381 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3382 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3384 if (unlikely(err
)) {
3389 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3390 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3391 if (rp
->rbr_index
== rp
->rbr_table_size
)
3394 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3395 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3396 rp
->rbr_pending
= 0;
3401 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3403 unsigned int index
= rp
->rcr_index
;
3408 struct page
*page
, **link
;
3414 val
= le64_to_cpup(&rp
->rcr
[index
]);
3415 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3416 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3417 page
= niu_find_rxpage(rp
, addr
, &link
);
3419 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3420 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3421 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3422 *link
= (struct page
*) page
->mapping
;
3423 np
->ops
->unmap_page(np
->device
, page
->index
,
3424 PAGE_SIZE
, DMA_FROM_DEVICE
);
3426 page
->mapping
= NULL
;
3428 rp
->rbr_refill_pending
++;
3431 index
= NEXT_RCR(rp
, index
);
3432 if (!(val
& RCR_ENTRY_MULTI
))
3436 rp
->rcr_index
= index
;
3441 static int niu_process_rx_pkt(struct napi_struct
*napi
, struct niu
*np
,
3442 struct rx_ring_info
*rp
)
3444 unsigned int index
= rp
->rcr_index
;
3445 struct rx_pkt_hdr1
*rh
;
3446 struct sk_buff
*skb
;
3449 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3451 return niu_rx_pkt_ignore(np
, rp
);
3455 struct page
*page
, **link
;
3456 u32 rcr_size
, append_size
;
3461 val
= le64_to_cpup(&rp
->rcr
[index
]);
3463 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3464 RCR_ENTRY_L2_LEN_SHIFT
;
3467 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3468 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3469 page
= niu_find_rxpage(rp
, addr
, &link
);
3471 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3472 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3474 off
= addr
& ~PAGE_MASK
;
3475 append_size
= rcr_size
;
3479 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3480 if ((ptype
== RCR_PKT_TYPE_TCP
||
3481 ptype
== RCR_PKT_TYPE_UDP
) &&
3482 !(val
& (RCR_ENTRY_NOPORT
|
3484 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3486 skb_checksum_none_assert(skb
);
3487 } else if (!(val
& RCR_ENTRY_MULTI
))
3488 append_size
= len
- skb
->len
;
3490 niu_rx_skb_append(skb
, page
, off
, append_size
);
3491 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3492 *link
= (struct page
*) page
->mapping
;
3493 np
->ops
->unmap_page(np
->device
, page
->index
,
3494 PAGE_SIZE
, DMA_FROM_DEVICE
);
3496 page
->mapping
= NULL
;
3497 rp
->rbr_refill_pending
++;
3501 index
= NEXT_RCR(rp
, index
);
3502 if (!(val
& RCR_ENTRY_MULTI
))
3506 rp
->rcr_index
= index
;
3509 len
= min_t(int, len
, sizeof(*rh
) + VLAN_ETH_HLEN
);
3510 __pskb_pull_tail(skb
, len
);
3512 rh
= (struct rx_pkt_hdr1
*) skb
->data
;
3513 if (np
->dev
->features
& NETIF_F_RXHASH
)
3514 skb
->rxhash
= ((u32
)rh
->hashval2_0
<< 24 |
3515 (u32
)rh
->hashval2_1
<< 16 |
3516 (u32
)rh
->hashval1_1
<< 8 |
3517 (u32
)rh
->hashval1_2
<< 0);
3518 skb_pull(skb
, sizeof(*rh
));
3521 rp
->rx_bytes
+= skb
->len
;
3523 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3524 skb_record_rx_queue(skb
, rp
->rx_channel
);
3525 napi_gro_receive(napi
, skb
);
3530 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3532 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3533 int err
, index
= rp
->rbr_index
;
3536 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3537 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3541 index
+= blocks_per_page
;
3544 rp
->rbr_index
= index
;
3548 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3552 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3555 page
= rp
->rxhash
[i
];
3557 struct page
*next
= (struct page
*) page
->mapping
;
3558 u64 base
= page
->index
;
3560 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3563 page
->mapping
= NULL
;
3571 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3572 rp
->rbr
[i
] = cpu_to_le32(0);
3576 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3578 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3579 struct sk_buff
*skb
= tb
->skb
;
3580 struct tx_pkt_hdr
*tp
;
3584 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3585 tx_flags
= le64_to_cpup(&tp
->flags
);
3588 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3589 ((tx_flags
& TXHDR_PAD
) / 2));
3591 len
= skb_headlen(skb
);
3592 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3593 len
, DMA_TO_DEVICE
);
3595 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3600 idx
= NEXT_TX(rp
, idx
);
3601 len
-= MAX_TX_DESC_LEN
;
3604 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3605 tb
= &rp
->tx_buffs
[idx
];
3606 BUG_ON(tb
->skb
!= NULL
);
3607 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3608 skb_shinfo(skb
)->frags
[i
].size
,
3610 idx
= NEXT_TX(rp
, idx
);
3618 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3620 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3622 struct netdev_queue
*txq
;
3627 index
= (rp
- np
->tx_rings
);
3628 txq
= netdev_get_tx_queue(np
->dev
, index
);
3631 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3634 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3635 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3636 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3638 rp
->last_pkt_cnt
= tmp
;
3642 netif_printk(np
, tx_done
, KERN_DEBUG
, np
->dev
,
3643 "%s() pkt_cnt[%u] cons[%d]\n", __func__
, pkt_cnt
, cons
);
3646 cons
= release_tx_packet(np
, rp
, cons
);
3652 if (unlikely(netif_tx_queue_stopped(txq
) &&
3653 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3654 __netif_tx_lock(txq
, smp_processor_id());
3655 if (netif_tx_queue_stopped(txq
) &&
3656 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3657 netif_tx_wake_queue(txq
);
3658 __netif_tx_unlock(txq
);
3662 static inline void niu_sync_rx_discard_stats(struct niu
*np
,
3663 struct rx_ring_info
*rp
,
3666 /* This elaborate scheme is needed for reading the RX discard
3667 * counters, as they are only 16-bit and can overflow quickly,
3668 * and because the overflow indication bit is not usable as
3669 * the counter value does not wrap, but remains at max value
3672 * In theory and in practice counters can be lost in between
3673 * reading nr64() and clearing the counter nw64(). For this
3674 * reason, the number of counter clearings nw64() is
3675 * limited/reduced though the limit parameter.
3677 int rx_channel
= rp
->rx_channel
;
3680 /* RXMISC (Receive Miscellaneous Discard Count), covers the
3681 * following discard events: IPP (Input Port Process),
3682 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3683 * Block Ring) prefetch buffer is empty.
3685 misc
= nr64(RXMISC(rx_channel
));
3686 if (unlikely((misc
& RXMISC_COUNT
) > limit
)) {
3687 nw64(RXMISC(rx_channel
), 0);
3688 rp
->rx_errors
+= misc
& RXMISC_COUNT
;
3690 if (unlikely(misc
& RXMISC_OFLOW
))
3691 dev_err(np
->device
, "rx-%d: Counter overflow RXMISC discard\n",
3694 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3695 "rx-%d: MISC drop=%u over=%u\n",
3696 rx_channel
, misc
, misc
-limit
);
3699 /* WRED (Weighted Random Early Discard) by hardware */
3700 wred
= nr64(RED_DIS_CNT(rx_channel
));
3701 if (unlikely((wred
& RED_DIS_CNT_COUNT
) > limit
)) {
3702 nw64(RED_DIS_CNT(rx_channel
), 0);
3703 rp
->rx_dropped
+= wred
& RED_DIS_CNT_COUNT
;
3705 if (unlikely(wred
& RED_DIS_CNT_OFLOW
))
3706 dev_err(np
->device
, "rx-%d: Counter overflow WRED discard\n", rx_channel
);
3708 netif_printk(np
, rx_err
, KERN_DEBUG
, np
->dev
,
3709 "rx-%d: WRED drop=%u over=%u\n",
3710 rx_channel
, wred
, wred
-limit
);
3714 static int niu_rx_work(struct napi_struct
*napi
, struct niu
*np
,
3715 struct rx_ring_info
*rp
, int budget
)
3717 int qlen
, rcr_done
= 0, work_done
= 0;
3718 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3722 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3723 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3725 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3726 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3728 mbox
->rx_dma_ctl_stat
= 0;
3729 mbox
->rcrstat_a
= 0;
3731 netif_printk(np
, rx_status
, KERN_DEBUG
, np
->dev
,
3732 "%s(chan[%d]), stat[%llx] qlen=%d\n",
3733 __func__
, rp
->rx_channel
, (unsigned long long)stat
, qlen
);
3735 rcr_done
= work_done
= 0;
3736 qlen
= min(qlen
, budget
);
3737 while (work_done
< qlen
) {
3738 rcr_done
+= niu_process_rx_pkt(napi
, np
, rp
);
3742 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3745 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3746 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3747 rp
->rbr_refill_pending
= 0;
3750 stat
= (RX_DMA_CTL_STAT_MEX
|
3751 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3752 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3754 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3756 /* Only sync discards stats when qlen indicate potential for drops */
3758 niu_sync_rx_discard_stats(np
, rp
, 0x7FFF);
3763 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3766 u32 tx_vec
= (v0
>> 32);
3767 u32 rx_vec
= (v0
& 0xffffffff);
3768 int i
, work_done
= 0;
3770 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
3771 "%s() v0[%016llx]\n", __func__
, (unsigned long long)v0
);
3773 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3774 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3775 if (tx_vec
& (1 << rp
->tx_channel
))
3776 niu_tx_work(np
, rp
);
3777 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3780 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3781 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3783 if (rx_vec
& (1 << rp
->rx_channel
)) {
3786 this_work_done
= niu_rx_work(&lp
->napi
, np
, rp
,
3789 budget
-= this_work_done
;
3790 work_done
+= this_work_done
;
3792 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3798 static int niu_poll(struct napi_struct
*napi
, int budget
)
3800 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3801 struct niu
*np
= lp
->np
;
3804 work_done
= niu_poll_core(np
, lp
, budget
);
3806 if (work_done
< budget
) {
3807 napi_complete(napi
);
3808 niu_ldg_rearm(np
, lp
, 1);
3813 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3816 netdev_err(np
->dev
, "RX channel %u errors ( ", rp
->rx_channel
);
3818 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3819 pr_cont("RBR_TMOUT ");
3820 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3821 pr_cont("RSP_CNT ");
3822 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3823 pr_cont("BYTE_EN_BUS ");
3824 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3825 pr_cont("RSP_DAT ");
3826 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3827 pr_cont("RCR_ACK ");
3828 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3829 pr_cont("RCR_SHA_PAR ");
3830 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3831 pr_cont("RBR_PRE_PAR ");
3832 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3834 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3835 pr_cont("RCRINCON ");
3836 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3837 pr_cont("RCRFULL ");
3838 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3839 pr_cont("RBRFULL ");
3840 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3841 pr_cont("RBRLOGPAGE ");
3842 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3843 pr_cont("CFIGLOGPAGE ");
3844 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3845 pr_cont("DC_FIDO ");
3850 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3852 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3856 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3857 RX_DMA_CTL_STAT_PORT_FATAL
))
3861 netdev_err(np
->dev
, "RX channel %u error, stat[%llx]\n",
3863 (unsigned long long) stat
);
3865 niu_log_rxchan_errors(np
, rp
, stat
);
3868 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3869 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3874 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3877 netdev_err(np
->dev
, "TX channel %u errors ( ", rp
->tx_channel
);
3879 if (cs
& TX_CS_MBOX_ERR
)
3881 if (cs
& TX_CS_PKT_SIZE_ERR
)
3882 pr_cont("PKT_SIZE ");
3883 if (cs
& TX_CS_TX_RING_OFLOW
)
3884 pr_cont("TX_RING_OFLOW ");
3885 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3886 pr_cont("PREF_BUF_PAR ");
3887 if (cs
& TX_CS_NACK_PREF
)
3888 pr_cont("NACK_PREF ");
3889 if (cs
& TX_CS_NACK_PKT_RD
)
3890 pr_cont("NACK_PKT_RD ");
3891 if (cs
& TX_CS_CONF_PART_ERR
)
3892 pr_cont("CONF_PART ");
3893 if (cs
& TX_CS_PKT_PRT_ERR
)
3894 pr_cont("PKT_PTR ");
3899 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3903 cs
= nr64(TX_CS(rp
->tx_channel
));
3904 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3905 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3907 netdev_err(np
->dev
, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3909 (unsigned long long)cs
,
3910 (unsigned long long)logh
,
3911 (unsigned long long)logl
);
3913 niu_log_txchan_errors(np
, rp
, cs
);
3918 static int niu_mif_interrupt(struct niu
*np
)
3920 u64 mif_status
= nr64(MIF_STATUS
);
3923 if (np
->flags
& NIU_FLAGS_XMAC
) {
3924 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3926 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3930 netdev_err(np
->dev
, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3931 (unsigned long long)mif_status
, phy_mdint
);
3936 static void niu_xmac_interrupt(struct niu
*np
)
3938 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3941 val
= nr64_mac(XTXMAC_STATUS
);
3942 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3943 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3944 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3945 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3946 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3947 mp
->tx_fifo_errors
++;
3948 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3949 mp
->tx_overflow_errors
++;
3950 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3951 mp
->tx_max_pkt_size_errors
++;
3952 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3953 mp
->tx_underflow_errors
++;
3955 val
= nr64_mac(XRXMAC_STATUS
);
3956 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3957 mp
->rx_local_faults
++;
3958 if (val
& XRXMAC_STATUS_RFLT_DET
)
3959 mp
->rx_remote_faults
++;
3960 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3961 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3962 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3963 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3964 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3965 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3966 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3967 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3968 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3969 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3970 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3971 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3972 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3973 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3974 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3975 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3976 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3977 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3978 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3979 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3980 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3981 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3982 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3983 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3984 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3985 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3986 if (val
& XRXMAC_STATUS_RXOCTET_CNT_EXP
)
3987 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3988 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3989 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3990 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3991 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3992 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3993 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3994 if (val
& XRXMAC_STATUS_RXUFLOW
)
3995 mp
->rx_underflows
++;
3996 if (val
& XRXMAC_STATUS_RXOFLOW
)
3999 val
= nr64_mac(XMAC_FC_STAT
);
4000 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
4001 mp
->pause_off_state
++;
4002 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
4003 mp
->pause_on_state
++;
4004 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
4005 mp
->pause_received
++;
4008 static void niu_bmac_interrupt(struct niu
*np
)
4010 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
4013 val
= nr64_mac(BTXMAC_STATUS
);
4014 if (val
& BTXMAC_STATUS_UNDERRUN
)
4015 mp
->tx_underflow_errors
++;
4016 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
4017 mp
->tx_max_pkt_size_errors
++;
4018 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
4019 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
4020 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
4021 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
4023 val
= nr64_mac(BRXMAC_STATUS
);
4024 if (val
& BRXMAC_STATUS_OVERFLOW
)
4026 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
4027 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
4028 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
4029 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4030 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
4031 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
4032 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
4033 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
4035 val
= nr64_mac(BMAC_CTRL_STATUS
);
4036 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
4037 mp
->pause_off_state
++;
4038 if (val
& BMAC_CTRL_STATUS_PAUSE
)
4039 mp
->pause_on_state
++;
4040 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
4041 mp
->pause_received
++;
4044 static int niu_mac_interrupt(struct niu
*np
)
4046 if (np
->flags
& NIU_FLAGS_XMAC
)
4047 niu_xmac_interrupt(np
);
4049 niu_bmac_interrupt(np
);
4054 static void niu_log_device_error(struct niu
*np
, u64 stat
)
4056 netdev_err(np
->dev
, "Core device errors ( ");
4058 if (stat
& SYS_ERR_MASK_META2
)
4060 if (stat
& SYS_ERR_MASK_META1
)
4062 if (stat
& SYS_ERR_MASK_PEU
)
4064 if (stat
& SYS_ERR_MASK_TXC
)
4066 if (stat
& SYS_ERR_MASK_RDMC
)
4068 if (stat
& SYS_ERR_MASK_TDMC
)
4070 if (stat
& SYS_ERR_MASK_ZCP
)
4072 if (stat
& SYS_ERR_MASK_FFLP
)
4074 if (stat
& SYS_ERR_MASK_IPP
)
4076 if (stat
& SYS_ERR_MASK_MAC
)
4078 if (stat
& SYS_ERR_MASK_SMX
)
4084 static int niu_device_error(struct niu
*np
)
4086 u64 stat
= nr64(SYS_ERR_STAT
);
4088 netdev_err(np
->dev
, "Core device error, stat[%llx]\n",
4089 (unsigned long long)stat
);
4091 niu_log_device_error(np
, stat
);
4096 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
4097 u64 v0
, u64 v1
, u64 v2
)
4106 if (v1
& 0x00000000ffffffffULL
) {
4107 u32 rx_vec
= (v1
& 0xffffffff);
4109 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4110 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4112 if (rx_vec
& (1 << rp
->rx_channel
)) {
4113 int r
= niu_rx_error(np
, rp
);
4118 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
4119 RX_DMA_CTL_STAT_MEX
);
4124 if (v1
& 0x7fffffff00000000ULL
) {
4125 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
4127 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4128 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4130 if (tx_vec
& (1 << rp
->tx_channel
)) {
4131 int r
= niu_tx_error(np
, rp
);
4137 if ((v0
| v1
) & 0x8000000000000000ULL
) {
4138 int r
= niu_mif_interrupt(np
);
4144 int r
= niu_mac_interrupt(np
);
4149 int r
= niu_device_error(np
);
4156 niu_enable_interrupts(np
, 0);
4161 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
4164 struct rxdma_mailbox
*mbox
= rp
->mbox
;
4165 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
4167 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
4168 RX_DMA_CTL_STAT_RCRTO
);
4169 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
4171 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4172 "%s() stat[%llx]\n", __func__
, (unsigned long long)stat
);
4175 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
4178 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
4180 netif_printk(np
, intr
, KERN_DEBUG
, np
->dev
,
4181 "%s() cs[%llx]\n", __func__
, (unsigned long long)rp
->tx_cs
);
4184 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
4186 struct niu_parent
*parent
= np
->parent
;
4190 tx_vec
= (v0
>> 32);
4191 rx_vec
= (v0
& 0xffffffff);
4193 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4194 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4195 int ldn
= LDN_RXDMA(rp
->rx_channel
);
4197 if (parent
->ldg_map
[ldn
] != ldg
)
4200 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4201 if (rx_vec
& (1 << rp
->rx_channel
))
4202 niu_rxchan_intr(np
, rp
, ldn
);
4205 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4206 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4207 int ldn
= LDN_TXDMA(rp
->tx_channel
);
4209 if (parent
->ldg_map
[ldn
] != ldg
)
4212 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
4213 if (tx_vec
& (1 << rp
->tx_channel
))
4214 niu_txchan_intr(np
, rp
, ldn
);
4218 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
4219 u64 v0
, u64 v1
, u64 v2
)
4221 if (likely(napi_schedule_prep(&lp
->napi
))) {
4225 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
4226 __napi_schedule(&lp
->napi
);
4230 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
4232 struct niu_ldg
*lp
= dev_id
;
4233 struct niu
*np
= lp
->np
;
4234 int ldg
= lp
->ldg_num
;
4235 unsigned long flags
;
4238 if (netif_msg_intr(np
))
4239 printk(KERN_DEBUG KBUILD_MODNAME
": " "%s() ldg[%p](%d)",
4242 spin_lock_irqsave(&np
->lock
, flags
);
4244 v0
= nr64(LDSV0(ldg
));
4245 v1
= nr64(LDSV1(ldg
));
4246 v2
= nr64(LDSV2(ldg
));
4248 if (netif_msg_intr(np
))
4249 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4250 (unsigned long long) v0
,
4251 (unsigned long long) v1
,
4252 (unsigned long long) v2
);
4254 if (unlikely(!v0
&& !v1
&& !v2
)) {
4255 spin_unlock_irqrestore(&np
->lock
, flags
);
4259 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
4260 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
4264 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
4265 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
4267 niu_ldg_rearm(np
, lp
, 1);
4269 spin_unlock_irqrestore(&np
->lock
, flags
);
4274 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
4277 np
->ops
->free_coherent(np
->device
,
4278 sizeof(struct rxdma_mailbox
),
4279 rp
->mbox
, rp
->mbox_dma
);
4283 np
->ops
->free_coherent(np
->device
,
4284 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4285 rp
->rcr
, rp
->rcr_dma
);
4287 rp
->rcr_table_size
= 0;
4291 niu_rbr_free(np
, rp
);
4293 np
->ops
->free_coherent(np
->device
,
4294 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4295 rp
->rbr
, rp
->rbr_dma
);
4297 rp
->rbr_table_size
= 0;
4304 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
4307 np
->ops
->free_coherent(np
->device
,
4308 sizeof(struct txdma_mailbox
),
4309 rp
->mbox
, rp
->mbox_dma
);
4315 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
4316 if (rp
->tx_buffs
[i
].skb
)
4317 (void) release_tx_packet(np
, rp
, i
);
4320 np
->ops
->free_coherent(np
->device
,
4321 MAX_TX_RING_SIZE
* sizeof(__le64
),
4322 rp
->descr
, rp
->descr_dma
);
4331 static void niu_free_channels(struct niu
*np
)
4336 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4337 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4339 niu_free_rx_ring_info(np
, rp
);
4341 kfree(np
->rx_rings
);
4342 np
->rx_rings
= NULL
;
4343 np
->num_rx_rings
= 0;
4347 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4348 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4350 niu_free_tx_ring_info(np
, rp
);
4352 kfree(np
->tx_rings
);
4353 np
->tx_rings
= NULL
;
4354 np
->num_tx_rings
= 0;
4358 static int niu_alloc_rx_ring_info(struct niu
*np
,
4359 struct rx_ring_info
*rp
)
4361 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
4363 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
4368 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4369 sizeof(struct rxdma_mailbox
),
4370 &rp
->mbox_dma
, GFP_KERNEL
);
4373 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4374 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4379 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
4380 MAX_RCR_RING_SIZE
* sizeof(__le64
),
4381 &rp
->rcr_dma
, GFP_KERNEL
);
4384 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
4385 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4389 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
4392 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
4393 MAX_RBR_RING_SIZE
* sizeof(__le32
),
4394 &rp
->rbr_dma
, GFP_KERNEL
);
4397 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
4398 netdev_err(np
->dev
, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4402 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
4404 rp
->rbr_pending
= 0;
4409 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
4411 int mtu
= np
->dev
->mtu
;
4413 /* These values are recommended by the HW designers for fair
4414 * utilization of DRR amongst the rings.
4416 rp
->max_burst
= mtu
+ 32;
4417 if (rp
->max_burst
> 4096)
4418 rp
->max_burst
= 4096;
4421 static int niu_alloc_tx_ring_info(struct niu
*np
,
4422 struct tx_ring_info
*rp
)
4424 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
4426 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
4427 sizeof(struct txdma_mailbox
),
4428 &rp
->mbox_dma
, GFP_KERNEL
);
4431 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
4432 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4437 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4438 MAX_TX_RING_SIZE
* sizeof(__le64
),
4439 &rp
->descr_dma
, GFP_KERNEL
);
4442 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4443 netdev_err(np
->dev
, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4448 rp
->pending
= MAX_TX_RING_SIZE
;
4453 /* XXX make these configurable... XXX */
4454 rp
->mark_freq
= rp
->pending
/ 4;
4456 niu_set_max_burst(np
, rp
);
4461 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4465 bss
= min(PAGE_SHIFT
, 15);
4467 rp
->rbr_block_size
= 1 << bss
;
4468 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4470 rp
->rbr_sizes
[0] = 256;
4471 rp
->rbr_sizes
[1] = 1024;
4472 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4473 switch (PAGE_SIZE
) {
4475 rp
->rbr_sizes
[2] = 4096;
4479 rp
->rbr_sizes
[2] = 8192;
4483 rp
->rbr_sizes
[2] = 2048;
4485 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4488 static int niu_alloc_channels(struct niu
*np
)
4490 struct niu_parent
*parent
= np
->parent
;
4491 int first_rx_channel
, first_tx_channel
;
4492 int num_rx_rings
, num_tx_rings
;
4493 struct rx_ring_info
*rx_rings
;
4494 struct tx_ring_info
*tx_rings
;
4498 first_rx_channel
= first_tx_channel
= 0;
4499 for (i
= 0; i
< port
; i
++) {
4500 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4501 first_tx_channel
+= parent
->txchan_per_port
[i
];
4504 num_rx_rings
= parent
->rxchan_per_port
[port
];
4505 num_tx_rings
= parent
->txchan_per_port
[port
];
4507 rx_rings
= kcalloc(num_rx_rings
, sizeof(struct rx_ring_info
),
4513 np
->num_rx_rings
= num_rx_rings
;
4515 np
->rx_rings
= rx_rings
;
4517 netif_set_real_num_rx_queues(np
->dev
, num_rx_rings
);
4519 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4520 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4523 rp
->rx_channel
= first_rx_channel
+ i
;
4525 err
= niu_alloc_rx_ring_info(np
, rp
);
4529 niu_size_rbr(np
, rp
);
4531 /* XXX better defaults, configurable, etc... XXX */
4532 rp
->nonsyn_window
= 64;
4533 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4534 rp
->syn_window
= 64;
4535 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4536 rp
->rcr_pkt_threshold
= 16;
4537 rp
->rcr_timeout
= 8;
4538 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4539 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4540 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4542 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4547 tx_rings
= kcalloc(num_tx_rings
, sizeof(struct tx_ring_info
),
4553 np
->num_tx_rings
= num_tx_rings
;
4555 np
->tx_rings
= tx_rings
;
4557 netif_set_real_num_tx_queues(np
->dev
, num_tx_rings
);
4559 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4560 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4563 rp
->tx_channel
= first_tx_channel
+ i
;
4565 err
= niu_alloc_tx_ring_info(np
, rp
);
4573 niu_free_channels(np
);
4577 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4581 while (--limit
> 0) {
4582 u64 val
= nr64(TX_CS(channel
));
4583 if (val
& TX_CS_SNG_STATE
)
4589 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4591 u64 val
= nr64(TX_CS(channel
));
4593 val
|= TX_CS_STOP_N_GO
;
4594 nw64(TX_CS(channel
), val
);
4596 return niu_tx_cs_sng_poll(np
, channel
);
4599 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4603 while (--limit
> 0) {
4604 u64 val
= nr64(TX_CS(channel
));
4605 if (!(val
& TX_CS_RST
))
4611 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4613 u64 val
= nr64(TX_CS(channel
));
4617 nw64(TX_CS(channel
), val
);
4619 err
= niu_tx_cs_reset_poll(np
, channel
);
4621 nw64(TX_RING_KICK(channel
), 0);
4626 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4630 nw64(TX_LOG_MASK1(channel
), 0);
4631 nw64(TX_LOG_VAL1(channel
), 0);
4632 nw64(TX_LOG_MASK2(channel
), 0);
4633 nw64(TX_LOG_VAL2(channel
), 0);
4634 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4635 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4636 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4638 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4639 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4640 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4642 /* XXX TXDMA 32bit mode? XXX */
4647 static void niu_txc_enable_port(struct niu
*np
, int on
)
4649 unsigned long flags
;
4652 niu_lock_parent(np
, flags
);
4653 val
= nr64(TXC_CONTROL
);
4654 mask
= (u64
)1 << np
->port
;
4656 val
|= TXC_CONTROL_ENABLE
| mask
;
4659 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4660 val
&= ~TXC_CONTROL_ENABLE
;
4662 nw64(TXC_CONTROL
, val
);
4663 niu_unlock_parent(np
, flags
);
4666 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4668 unsigned long flags
;
4671 niu_lock_parent(np
, flags
);
4672 val
= nr64(TXC_INT_MASK
);
4673 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4674 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4675 niu_unlock_parent(np
, flags
);
4678 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4685 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4686 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4688 nw64(TXC_PORT_DMA(np
->port
), val
);
4691 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4693 int err
, channel
= rp
->tx_channel
;
4696 err
= niu_tx_channel_stop(np
, channel
);
4700 err
= niu_tx_channel_reset(np
, channel
);
4704 err
= niu_tx_channel_lpage_init(np
, channel
);
4708 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4709 nw64(TX_ENT_MSK(channel
), 0);
4711 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4712 TX_RNG_CFIG_STADDR
)) {
4713 netdev_err(np
->dev
, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4714 channel
, (unsigned long long)rp
->descr_dma
);
4718 /* The length field in TX_RNG_CFIG is measured in 64-byte
4719 * blocks. rp->pending is the number of TX descriptors in
4720 * our ring, 8 bytes each, thus we divide by 8 bytes more
4721 * to get the proper value the chip wants.
4723 ring_len
= (rp
->pending
/ 8);
4725 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4727 nw64(TX_RNG_CFIG(channel
), val
);
4729 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4730 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4731 netdev_err(np
->dev
, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4732 channel
, (unsigned long long)rp
->mbox_dma
);
4735 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4736 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4738 nw64(TX_CS(channel
), 0);
4740 rp
->last_pkt_cnt
= 0;
4745 static void niu_init_rdc_groups(struct niu
*np
)
4747 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4748 int i
, first_table_num
= tp
->first_table_num
;
4750 for (i
= 0; i
< tp
->num_tables
; i
++) {
4751 struct rdc_table
*tbl
= &tp
->tables
[i
];
4752 int this_table
= first_table_num
+ i
;
4755 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4756 nw64(RDC_TBL(this_table
, slot
),
4757 tbl
->rxdma_channel
[slot
]);
4760 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4763 static void niu_init_drr_weight(struct niu
*np
)
4765 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4770 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4775 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4778 nw64(PT_DRR_WT(np
->port
), val
);
4781 static int niu_init_hostinfo(struct niu
*np
)
4783 struct niu_parent
*parent
= np
->parent
;
4784 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4785 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4786 int first_rdc_table
= tp
->first_table_num
;
4788 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4792 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4796 for (i
= 0; i
< num_alt
; i
++) {
4797 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4805 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4807 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4808 RXDMA_CFIG1_RST
, 1000, 10,
4812 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4816 nw64(RX_LOG_MASK1(channel
), 0);
4817 nw64(RX_LOG_VAL1(channel
), 0);
4818 nw64(RX_LOG_MASK2(channel
), 0);
4819 nw64(RX_LOG_VAL2(channel
), 0);
4820 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4821 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4822 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4824 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4825 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4826 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4831 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4835 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4836 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4837 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4838 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4839 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4842 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4847 switch (rp
->rbr_block_size
) {
4849 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4852 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4855 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4858 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4863 val
|= RBR_CFIG_B_VLD2
;
4864 switch (rp
->rbr_sizes
[2]) {
4866 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4869 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4872 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4875 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4881 val
|= RBR_CFIG_B_VLD1
;
4882 switch (rp
->rbr_sizes
[1]) {
4884 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4887 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4890 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4893 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4899 val
|= RBR_CFIG_B_VLD0
;
4900 switch (rp
->rbr_sizes
[0]) {
4902 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4905 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4908 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4911 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4922 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4924 u64 val
= nr64(RXDMA_CFIG1(channel
));
4928 val
|= RXDMA_CFIG1_EN
;
4930 val
&= ~RXDMA_CFIG1_EN
;
4931 nw64(RXDMA_CFIG1(channel
), val
);
4934 while (--limit
> 0) {
4935 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4944 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4946 int err
, channel
= rp
->rx_channel
;
4949 err
= niu_rx_channel_reset(np
, channel
);
4953 err
= niu_rx_channel_lpage_init(np
, channel
);
4957 niu_rx_channel_wred_init(np
, rp
);
4959 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4960 nw64(RX_DMA_CTL_STAT(channel
),
4961 (RX_DMA_CTL_STAT_MEX
|
4962 RX_DMA_CTL_STAT_RCRTHRES
|
4963 RX_DMA_CTL_STAT_RCRTO
|
4964 RX_DMA_CTL_STAT_RBR_EMPTY
));
4965 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4966 nw64(RXDMA_CFIG2(channel
),
4967 ((rp
->mbox_dma
& RXDMA_CFIG2_MBADDR_L
) |
4968 RXDMA_CFIG2_FULL_HDR
));
4969 nw64(RBR_CFIG_A(channel
),
4970 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4971 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4972 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4975 nw64(RBR_CFIG_B(channel
), val
);
4976 nw64(RCRCFIG_A(channel
),
4977 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4978 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4979 nw64(RCRCFIG_B(channel
),
4980 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4982 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4984 err
= niu_enable_rx_channel(np
, channel
, 1);
4988 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4990 val
= nr64(RX_DMA_CTL_STAT(channel
));
4991 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4992 nw64(RX_DMA_CTL_STAT(channel
), val
);
4997 static int niu_init_rx_channels(struct niu
*np
)
4999 unsigned long flags
;
5000 u64 seed
= jiffies_64
;
5003 niu_lock_parent(np
, flags
);
5004 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
5005 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
5006 niu_unlock_parent(np
, flags
);
5008 /* XXX RXDMA 32bit mode? XXX */
5010 niu_init_rdc_groups(np
);
5011 niu_init_drr_weight(np
);
5013 err
= niu_init_hostinfo(np
);
5017 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5018 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5020 err
= niu_init_one_rx_channel(np
, rp
);
5028 static int niu_set_ip_frag_rule(struct niu
*np
)
5030 struct niu_parent
*parent
= np
->parent
;
5031 struct niu_classifier
*cp
= &np
->clas
;
5032 struct niu_tcam_entry
*tp
;
5035 index
= cp
->tcam_top
;
5036 tp
= &parent
->tcam
[index
];
5038 /* Note that the noport bit is the same in both ipv4 and
5039 * ipv6 format TCAM entries.
5041 memset(tp
, 0, sizeof(*tp
));
5042 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
5043 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
5044 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
5045 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
5046 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
5049 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
5053 cp
->tcam_valid_entries
++;
5058 static int niu_init_classifier_hw(struct niu
*np
)
5060 struct niu_parent
*parent
= np
->parent
;
5061 struct niu_classifier
*cp
= &np
->clas
;
5064 nw64(H1POLY
, cp
->h1_init
);
5065 nw64(H2POLY
, cp
->h2_init
);
5067 err
= niu_init_hostinfo(np
);
5071 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
5072 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
5074 vlan_tbl_write(np
, i
, np
->port
,
5075 vp
->vlan_pref
, vp
->rdc_num
);
5078 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
5079 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
5081 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
5082 ap
->rdc_num
, ap
->mac_pref
);
5087 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
5088 int index
= i
- CLASS_CODE_USER_PROG1
;
5090 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
5093 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
5098 err
= niu_set_ip_frag_rule(np
);
5107 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
5109 nw64(ZCP_RAM_DATA0
, data
[0]);
5110 nw64(ZCP_RAM_DATA1
, data
[1]);
5111 nw64(ZCP_RAM_DATA2
, data
[2]);
5112 nw64(ZCP_RAM_DATA3
, data
[3]);
5113 nw64(ZCP_RAM_DATA4
, data
[4]);
5114 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
5116 (ZCP_RAM_ACC_WRITE
|
5117 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5118 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5120 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5124 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
5128 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5131 netdev_err(np
->dev
, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5132 (unsigned long long)nr64(ZCP_RAM_ACC
));
5138 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
5139 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
5141 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
5144 netdev_err(np
->dev
, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5145 (unsigned long long)nr64(ZCP_RAM_ACC
));
5149 data
[0] = nr64(ZCP_RAM_DATA0
);
5150 data
[1] = nr64(ZCP_RAM_DATA1
);
5151 data
[2] = nr64(ZCP_RAM_DATA2
);
5152 data
[3] = nr64(ZCP_RAM_DATA3
);
5153 data
[4] = nr64(ZCP_RAM_DATA4
);
5158 static void niu_zcp_cfifo_reset(struct niu
*np
)
5160 u64 val
= nr64(RESET_CFIFO
);
5162 val
|= RESET_CFIFO_RST(np
->port
);
5163 nw64(RESET_CFIFO
, val
);
5166 val
&= ~RESET_CFIFO_RST(np
->port
);
5167 nw64(RESET_CFIFO
, val
);
5170 static int niu_init_zcp(struct niu
*np
)
5172 u64 data
[5], rbuf
[5];
5175 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5176 if (np
->port
== 0 || np
->port
== 1)
5177 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
5179 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
5181 max
= NIU_CFIFO_ENTRIES
;
5189 for (i
= 0; i
< max
; i
++) {
5190 err
= niu_zcp_write(np
, i
, data
);
5193 err
= niu_zcp_read(np
, i
, rbuf
);
5198 niu_zcp_cfifo_reset(np
);
5199 nw64(CFIFO_ECC(np
->port
), 0);
5200 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
5201 (void) nr64(ZCP_INT_STAT
);
5202 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
5207 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
5209 u64 val
= nr64_ipp(IPP_CFIG
);
5211 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
5212 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
5213 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
5214 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
5215 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
5216 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
5217 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
5218 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
5221 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
5223 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
5224 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
5225 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
5226 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
5227 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
5228 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
5231 static int niu_ipp_reset(struct niu
*np
)
5233 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
5234 1000, 100, "IPP_CFIG");
5237 static int niu_init_ipp(struct niu
*np
)
5239 u64 data
[5], rbuf
[5], val
;
5242 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
5243 if (np
->port
== 0 || np
->port
== 1)
5244 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
5246 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
5248 max
= NIU_DFIFO_ENTRIES
;
5256 for (i
= 0; i
< max
; i
++) {
5257 niu_ipp_write(np
, i
, data
);
5258 niu_ipp_read(np
, i
, rbuf
);
5261 (void) nr64_ipp(IPP_INT_STAT
);
5262 (void) nr64_ipp(IPP_INT_STAT
);
5264 err
= niu_ipp_reset(np
);
5268 (void) nr64_ipp(IPP_PKT_DIS
);
5269 (void) nr64_ipp(IPP_BAD_CS_CNT
);
5270 (void) nr64_ipp(IPP_ECC
);
5272 (void) nr64_ipp(IPP_INT_STAT
);
5274 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
5276 val
= nr64_ipp(IPP_CFIG
);
5277 val
&= ~IPP_CFIG_IP_MAX_PKT
;
5278 val
|= (IPP_CFIG_IPP_ENABLE
|
5279 IPP_CFIG_DFIFO_ECC_EN
|
5280 IPP_CFIG_DROP_BAD_CRC
|
5282 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
5283 nw64_ipp(IPP_CFIG
, val
);
5288 static void niu_handle_led(struct niu
*np
, int status
)
5291 val
= nr64_mac(XMAC_CONFIG
);
5293 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
5294 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
5296 val
|= XMAC_CONFIG_LED_POLARITY
;
5297 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
5299 val
|= XMAC_CONFIG_FORCE_LED_ON
;
5300 val
&= ~XMAC_CONFIG_LED_POLARITY
;
5304 nw64_mac(XMAC_CONFIG
, val
);
5307 static void niu_init_xif_xmac(struct niu
*np
)
5309 struct niu_link_config
*lp
= &np
->link_config
;
5312 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
5313 val
= nr64(MIF_CONFIG
);
5314 val
|= MIF_CONFIG_ATCA_GE
;
5315 nw64(MIF_CONFIG
, val
);
5318 val
= nr64_mac(XMAC_CONFIG
);
5319 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5321 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
5323 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
5324 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
5325 val
|= XMAC_CONFIG_LOOPBACK
;
5327 val
&= ~XMAC_CONFIG_LOOPBACK
;
5330 if (np
->flags
& NIU_FLAGS_10G
) {
5331 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
5333 val
|= XMAC_CONFIG_LFS_DISABLE
;
5334 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
5335 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
5336 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
5338 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
5341 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5343 if (lp
->active_speed
== SPEED_100
)
5344 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
5346 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
5348 nw64_mac(XMAC_CONFIG
, val
);
5350 val
= nr64_mac(XMAC_CONFIG
);
5351 val
&= ~XMAC_CONFIG_MODE_MASK
;
5352 if (np
->flags
& NIU_FLAGS_10G
) {
5353 val
|= XMAC_CONFIG_MODE_XGMII
;
5355 if (lp
->active_speed
== SPEED_1000
)
5356 val
|= XMAC_CONFIG_MODE_GMII
;
5358 val
|= XMAC_CONFIG_MODE_MII
;
5361 nw64_mac(XMAC_CONFIG
, val
);
5364 static void niu_init_xif_bmac(struct niu
*np
)
5366 struct niu_link_config
*lp
= &np
->link_config
;
5369 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
5371 if (lp
->loopback_mode
== LOOPBACK_MAC
)
5372 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
5374 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
5376 if (lp
->active_speed
== SPEED_1000
)
5377 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
5379 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
5381 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
5382 BMAC_XIF_CONFIG_LED_POLARITY
);
5384 if (!(np
->flags
& NIU_FLAGS_10G
) &&
5385 !(np
->flags
& NIU_FLAGS_FIBER
) &&
5386 lp
->active_speed
== SPEED_100
)
5387 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5389 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
5391 nw64_mac(BMAC_XIF_CONFIG
, val
);
5394 static void niu_init_xif(struct niu
*np
)
5396 if (np
->flags
& NIU_FLAGS_XMAC
)
5397 niu_init_xif_xmac(np
);
5399 niu_init_xif_bmac(np
);
5402 static void niu_pcs_mii_reset(struct niu
*np
)
5405 u64 val
= nr64_pcs(PCS_MII_CTL
);
5406 val
|= PCS_MII_CTL_RST
;
5407 nw64_pcs(PCS_MII_CTL
, val
);
5408 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
5410 val
= nr64_pcs(PCS_MII_CTL
);
5414 static void niu_xpcs_reset(struct niu
*np
)
5417 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
5418 val
|= XPCS_CONTROL1_RESET
;
5419 nw64_xpcs(XPCS_CONTROL1
, val
);
5420 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
5422 val
= nr64_xpcs(XPCS_CONTROL1
);
5426 static int niu_init_pcs(struct niu
*np
)
5428 struct niu_link_config
*lp
= &np
->link_config
;
5431 switch (np
->flags
& (NIU_FLAGS_10G
|
5433 NIU_FLAGS_XCVR_SERDES
)) {
5434 case NIU_FLAGS_FIBER
:
5436 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5437 nw64_pcs(PCS_DPATH_MODE
, 0);
5438 niu_pcs_mii_reset(np
);
5442 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
5443 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
5445 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5448 /* 10G copper or fiber */
5449 val
= nr64_mac(XMAC_CONFIG
);
5450 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5451 nw64_mac(XMAC_CONFIG
, val
);
5455 val
= nr64_xpcs(XPCS_CONTROL1
);
5456 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5457 val
|= XPCS_CONTROL1_LOOPBACK
;
5459 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5460 nw64_xpcs(XPCS_CONTROL1
, val
);
5462 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5463 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5464 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5468 case NIU_FLAGS_XCVR_SERDES
:
5470 niu_pcs_mii_reset(np
);
5471 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5472 nw64_pcs(PCS_DPATH_MODE
, 0);
5477 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5478 /* 1G RGMII FIBER */
5479 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5480 niu_pcs_mii_reset(np
);
5490 static int niu_reset_tx_xmac(struct niu
*np
)
5492 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5493 (XTXMAC_SW_RST_REG_RS
|
5494 XTXMAC_SW_RST_SOFT_RST
),
5495 1000, 100, "XTXMAC_SW_RST");
5498 static int niu_reset_tx_bmac(struct niu
*np
)
5502 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5504 while (--limit
>= 0) {
5505 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5510 dev_err(np
->device
, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5512 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5519 static int niu_reset_tx_mac(struct niu
*np
)
5521 if (np
->flags
& NIU_FLAGS_XMAC
)
5522 return niu_reset_tx_xmac(np
);
5524 return niu_reset_tx_bmac(np
);
5527 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5531 val
= nr64_mac(XMAC_MIN
);
5532 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5533 XMAC_MIN_RX_MIN_PKT_SIZE
);
5534 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5535 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5536 nw64_mac(XMAC_MIN
, val
);
5538 nw64_mac(XMAC_MAX
, max
);
5540 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5542 val
= nr64_mac(XMAC_IPG
);
5543 if (np
->flags
& NIU_FLAGS_10G
) {
5544 val
&= ~XMAC_IPG_IPG_XGMII
;
5545 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5547 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5548 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5550 nw64_mac(XMAC_IPG
, val
);
5552 val
= nr64_mac(XMAC_CONFIG
);
5553 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5554 XMAC_CONFIG_STRETCH_MODE
|
5555 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5556 XMAC_CONFIG_TX_ENABLE
);
5557 nw64_mac(XMAC_CONFIG
, val
);
5559 nw64_mac(TXMAC_FRM_CNT
, 0);
5560 nw64_mac(TXMAC_BYTE_CNT
, 0);
5563 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5567 nw64_mac(BMAC_MIN_FRAME
, min
);
5568 nw64_mac(BMAC_MAX_FRAME
, max
);
5570 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5571 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5572 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5574 val
= nr64_mac(BTXMAC_CONFIG
);
5575 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5576 BTXMAC_CONFIG_ENABLE
);
5577 nw64_mac(BTXMAC_CONFIG
, val
);
5580 static void niu_init_tx_mac(struct niu
*np
)
5585 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5590 /* The XMAC_MIN register only accepts values for TX min which
5591 * have the low 3 bits cleared.
5595 if (np
->flags
& NIU_FLAGS_XMAC
)
5596 niu_init_tx_xmac(np
, min
, max
);
5598 niu_init_tx_bmac(np
, min
, max
);
5601 static int niu_reset_rx_xmac(struct niu
*np
)
5605 nw64_mac(XRXMAC_SW_RST
,
5606 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5608 while (--limit
>= 0) {
5609 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5610 XRXMAC_SW_RST_SOFT_RST
)))
5615 dev_err(np
->device
, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5617 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5624 static int niu_reset_rx_bmac(struct niu
*np
)
5628 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5630 while (--limit
>= 0) {
5631 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5636 dev_err(np
->device
, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5638 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5645 static int niu_reset_rx_mac(struct niu
*np
)
5647 if (np
->flags
& NIU_FLAGS_XMAC
)
5648 return niu_reset_rx_xmac(np
);
5650 return niu_reset_rx_bmac(np
);
5653 static void niu_init_rx_xmac(struct niu
*np
)
5655 struct niu_parent
*parent
= np
->parent
;
5656 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5657 int first_rdc_table
= tp
->first_table_num
;
5661 nw64_mac(XMAC_ADD_FILT0
, 0);
5662 nw64_mac(XMAC_ADD_FILT1
, 0);
5663 nw64_mac(XMAC_ADD_FILT2
, 0);
5664 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5665 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5666 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5667 nw64_mac(XMAC_HASH_TBL(i
), 0);
5668 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5669 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5670 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5672 val
= nr64_mac(XMAC_CONFIG
);
5673 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5674 XMAC_CONFIG_PROMISCUOUS
|
5675 XMAC_CONFIG_PROMISC_GROUP
|
5676 XMAC_CONFIG_ERR_CHK_DIS
|
5677 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5678 XMAC_CONFIG_RESERVED_MULTICAST
|
5679 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5680 XMAC_CONFIG_ADDR_FILTER_EN
|
5681 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5682 XMAC_CONFIG_STRIP_CRC
|
5683 XMAC_CONFIG_PASS_FLOW_CTRL
|
5684 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5685 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5686 nw64_mac(XMAC_CONFIG
, val
);
5688 nw64_mac(RXMAC_BT_CNT
, 0);
5689 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5690 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5691 nw64_mac(RXMAC_FRAG_CNT
, 0);
5692 nw64_mac(RXMAC_HIST_CNT1
, 0);
5693 nw64_mac(RXMAC_HIST_CNT2
, 0);
5694 nw64_mac(RXMAC_HIST_CNT3
, 0);
5695 nw64_mac(RXMAC_HIST_CNT4
, 0);
5696 nw64_mac(RXMAC_HIST_CNT5
, 0);
5697 nw64_mac(RXMAC_HIST_CNT6
, 0);
5698 nw64_mac(RXMAC_HIST_CNT7
, 0);
5699 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5700 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5701 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5702 nw64_mac(LINK_FAULT_CNT
, 0);
5705 static void niu_init_rx_bmac(struct niu
*np
)
5707 struct niu_parent
*parent
= np
->parent
;
5708 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5709 int first_rdc_table
= tp
->first_table_num
;
5713 nw64_mac(BMAC_ADD_FILT0
, 0);
5714 nw64_mac(BMAC_ADD_FILT1
, 0);
5715 nw64_mac(BMAC_ADD_FILT2
, 0);
5716 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5717 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5718 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5719 nw64_mac(BMAC_HASH_TBL(i
), 0);
5720 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5721 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5722 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5724 val
= nr64_mac(BRXMAC_CONFIG
);
5725 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5726 BRXMAC_CONFIG_STRIP_PAD
|
5727 BRXMAC_CONFIG_STRIP_FCS
|
5728 BRXMAC_CONFIG_PROMISC
|
5729 BRXMAC_CONFIG_PROMISC_GRP
|
5730 BRXMAC_CONFIG_ADDR_FILT_EN
|
5731 BRXMAC_CONFIG_DISCARD_DIS
);
5732 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5733 nw64_mac(BRXMAC_CONFIG
, val
);
5735 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5736 val
|= BMAC_ADDR_CMPEN_EN0
;
5737 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5740 static void niu_init_rx_mac(struct niu
*np
)
5742 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5744 if (np
->flags
& NIU_FLAGS_XMAC
)
5745 niu_init_rx_xmac(np
);
5747 niu_init_rx_bmac(np
);
5750 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5752 u64 val
= nr64_mac(XMAC_CONFIG
);
5755 val
|= XMAC_CONFIG_TX_ENABLE
;
5757 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5758 nw64_mac(XMAC_CONFIG
, val
);
5761 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5763 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5766 val
|= BTXMAC_CONFIG_ENABLE
;
5768 val
&= ~BTXMAC_CONFIG_ENABLE
;
5769 nw64_mac(BTXMAC_CONFIG
, val
);
5772 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5774 if (np
->flags
& NIU_FLAGS_XMAC
)
5775 niu_enable_tx_xmac(np
, on
);
5777 niu_enable_tx_bmac(np
, on
);
5780 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5782 u64 val
= nr64_mac(XMAC_CONFIG
);
5784 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5785 XMAC_CONFIG_PROMISCUOUS
);
5787 if (np
->flags
& NIU_FLAGS_MCAST
)
5788 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5789 if (np
->flags
& NIU_FLAGS_PROMISC
)
5790 val
|= XMAC_CONFIG_PROMISCUOUS
;
5793 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5795 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5796 nw64_mac(XMAC_CONFIG
, val
);
5799 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5801 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5803 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5804 BRXMAC_CONFIG_PROMISC
);
5806 if (np
->flags
& NIU_FLAGS_MCAST
)
5807 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5808 if (np
->flags
& NIU_FLAGS_PROMISC
)
5809 val
|= BRXMAC_CONFIG_PROMISC
;
5812 val
|= BRXMAC_CONFIG_ENABLE
;
5814 val
&= ~BRXMAC_CONFIG_ENABLE
;
5815 nw64_mac(BRXMAC_CONFIG
, val
);
5818 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5820 if (np
->flags
& NIU_FLAGS_XMAC
)
5821 niu_enable_rx_xmac(np
, on
);
5823 niu_enable_rx_bmac(np
, on
);
5826 static int niu_init_mac(struct niu
*np
)
5831 err
= niu_init_pcs(np
);
5835 err
= niu_reset_tx_mac(np
);
5838 niu_init_tx_mac(np
);
5839 err
= niu_reset_rx_mac(np
);
5842 niu_init_rx_mac(np
);
5844 /* This looks hookey but the RX MAC reset we just did will
5845 * undo some of the state we setup in niu_init_tx_mac() so we
5846 * have to call it again. In particular, the RX MAC reset will
5847 * set the XMAC_MAX register back to it's default value.
5849 niu_init_tx_mac(np
);
5850 niu_enable_tx_mac(np
, 1);
5852 niu_enable_rx_mac(np
, 1);
5857 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5859 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5862 static void niu_stop_tx_channels(struct niu
*np
)
5866 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5867 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5869 niu_stop_one_tx_channel(np
, rp
);
5873 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5875 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5878 static void niu_reset_tx_channels(struct niu
*np
)
5882 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5883 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5885 niu_reset_one_tx_channel(np
, rp
);
5889 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5891 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5894 static void niu_stop_rx_channels(struct niu
*np
)
5898 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5899 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5901 niu_stop_one_rx_channel(np
, rp
);
5905 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5907 int channel
= rp
->rx_channel
;
5909 (void) niu_rx_channel_reset(np
, channel
);
5910 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5911 nw64(RX_DMA_CTL_STAT(channel
), 0);
5912 (void) niu_enable_rx_channel(np
, channel
, 0);
5915 static void niu_reset_rx_channels(struct niu
*np
)
5919 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5920 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5922 niu_reset_one_rx_channel(np
, rp
);
5926 static void niu_disable_ipp(struct niu
*np
)
5931 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5932 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5934 while (--limit
>= 0 && (rd
!= wr
)) {
5935 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5936 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5939 (rd
!= 0 && wr
!= 1)) {
5940 netdev_err(np
->dev
, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5941 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR
),
5942 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR
));
5945 val
= nr64_ipp(IPP_CFIG
);
5946 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5947 IPP_CFIG_DFIFO_ECC_EN
|
5948 IPP_CFIG_DROP_BAD_CRC
|
5950 nw64_ipp(IPP_CFIG
, val
);
5952 (void) niu_ipp_reset(np
);
5955 static int niu_init_hw(struct niu
*np
)
5959 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TXC\n");
5960 niu_txc_enable_port(np
, 1);
5961 niu_txc_port_dma_enable(np
, 1);
5962 niu_txc_set_imask(np
, 0);
5964 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize TX channels\n");
5965 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5966 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5968 err
= niu_init_one_tx_channel(np
, rp
);
5973 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize RX channels\n");
5974 err
= niu_init_rx_channels(np
);
5976 goto out_uninit_tx_channels
;
5978 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize classifier\n");
5979 err
= niu_init_classifier_hw(np
);
5981 goto out_uninit_rx_channels
;
5983 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize ZCP\n");
5984 err
= niu_init_zcp(np
);
5986 goto out_uninit_rx_channels
;
5988 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize IPP\n");
5989 err
= niu_init_ipp(np
);
5991 goto out_uninit_rx_channels
;
5993 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Initialize MAC\n");
5994 err
= niu_init_mac(np
);
5996 goto out_uninit_ipp
;
6001 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit IPP\n");
6002 niu_disable_ipp(np
);
6004 out_uninit_rx_channels
:
6005 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit RX channels\n");
6006 niu_stop_rx_channels(np
);
6007 niu_reset_rx_channels(np
);
6009 out_uninit_tx_channels
:
6010 netif_printk(np
, ifup
, KERN_DEBUG
, np
->dev
, "Uninit TX channels\n");
6011 niu_stop_tx_channels(np
);
6012 niu_reset_tx_channels(np
);
6017 static void niu_stop_hw(struct niu
*np
)
6019 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable interrupts\n");
6020 niu_enable_interrupts(np
, 0);
6022 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable RX MAC\n");
6023 niu_enable_rx_mac(np
, 0);
6025 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Disable IPP\n");
6026 niu_disable_ipp(np
);
6028 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop TX channels\n");
6029 niu_stop_tx_channels(np
);
6031 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Stop RX channels\n");
6032 niu_stop_rx_channels(np
);
6034 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset TX channels\n");
6035 niu_reset_tx_channels(np
);
6037 netif_printk(np
, ifdown
, KERN_DEBUG
, np
->dev
, "Reset RX channels\n");
6038 niu_reset_rx_channels(np
);
6041 static void niu_set_irq_name(struct niu
*np
)
6043 int port
= np
->port
;
6046 sprintf(np
->irq_name
[0], "%s:MAC", np
->dev
->name
);
6049 sprintf(np
->irq_name
[1], "%s:MIF", np
->dev
->name
);
6050 sprintf(np
->irq_name
[2], "%s:SYSERR", np
->dev
->name
);
6054 for (i
= 0; i
< np
->num_ldg
- j
; i
++) {
6055 if (i
< np
->num_rx_rings
)
6056 sprintf(np
->irq_name
[i
+j
], "%s-rx-%d",
6058 else if (i
< np
->num_tx_rings
+ np
->num_rx_rings
)
6059 sprintf(np
->irq_name
[i
+j
], "%s-tx-%d", np
->dev
->name
,
6060 i
- np
->num_rx_rings
);
6064 static int niu_request_irq(struct niu
*np
)
6068 niu_set_irq_name(np
);
6071 for (i
= 0; i
< np
->num_ldg
; i
++) {
6072 struct niu_ldg
*lp
= &np
->ldg
[i
];
6074 err
= request_irq(lp
->irq
, niu_interrupt
,
6075 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
6076 np
->irq_name
[i
], lp
);
6085 for (j
= 0; j
< i
; j
++) {
6086 struct niu_ldg
*lp
= &np
->ldg
[j
];
6088 free_irq(lp
->irq
, lp
);
6093 static void niu_free_irq(struct niu
*np
)
6097 for (i
= 0; i
< np
->num_ldg
; i
++) {
6098 struct niu_ldg
*lp
= &np
->ldg
[i
];
6100 free_irq(lp
->irq
, lp
);
6104 static void niu_enable_napi(struct niu
*np
)
6108 for (i
= 0; i
< np
->num_ldg
; i
++)
6109 napi_enable(&np
->ldg
[i
].napi
);
6112 static void niu_disable_napi(struct niu
*np
)
6116 for (i
= 0; i
< np
->num_ldg
; i
++)
6117 napi_disable(&np
->ldg
[i
].napi
);
6120 static int niu_open(struct net_device
*dev
)
6122 struct niu
*np
= netdev_priv(dev
);
6125 netif_carrier_off(dev
);
6127 err
= niu_alloc_channels(np
);
6131 err
= niu_enable_interrupts(np
, 0);
6133 goto out_free_channels
;
6135 err
= niu_request_irq(np
);
6137 goto out_free_channels
;
6139 niu_enable_napi(np
);
6141 spin_lock_irq(&np
->lock
);
6143 err
= niu_init_hw(np
);
6145 init_timer(&np
->timer
);
6146 np
->timer
.expires
= jiffies
+ HZ
;
6147 np
->timer
.data
= (unsigned long) np
;
6148 np
->timer
.function
= niu_timer
;
6150 err
= niu_enable_interrupts(np
, 1);
6155 spin_unlock_irq(&np
->lock
);
6158 niu_disable_napi(np
);
6162 netif_tx_start_all_queues(dev
);
6164 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6165 netif_carrier_on(dev
);
6167 add_timer(&np
->timer
);
6175 niu_free_channels(np
);
6181 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
6183 cancel_work_sync(&np
->reset_task
);
6185 niu_disable_napi(np
);
6186 netif_tx_stop_all_queues(dev
);
6188 del_timer_sync(&np
->timer
);
6190 spin_lock_irq(&np
->lock
);
6194 spin_unlock_irq(&np
->lock
);
6197 static int niu_close(struct net_device
*dev
)
6199 struct niu
*np
= netdev_priv(dev
);
6201 niu_full_shutdown(np
, dev
);
6205 niu_free_channels(np
);
6207 niu_handle_led(np
, 0);
6212 static void niu_sync_xmac_stats(struct niu
*np
)
6214 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
6216 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
6217 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
6219 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
6220 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
6221 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
6222 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
6223 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
6224 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
6225 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
6226 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
6227 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
6228 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
6229 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
6230 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
6231 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
6232 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
6233 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
6234 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
6237 static void niu_sync_bmac_stats(struct niu
*np
)
6239 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
6241 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
6242 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
6244 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
6245 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6246 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
6247 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
6250 static void niu_sync_mac_stats(struct niu
*np
)
6252 if (np
->flags
& NIU_FLAGS_XMAC
)
6253 niu_sync_xmac_stats(np
);
6255 niu_sync_bmac_stats(np
);
6258 static void niu_get_rx_stats(struct niu
*np
)
6260 unsigned long pkts
, dropped
, errors
, bytes
;
6261 struct rx_ring_info
*rx_rings
;
6264 pkts
= dropped
= errors
= bytes
= 0;
6266 rx_rings
= ACCESS_ONCE(np
->rx_rings
);
6270 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6271 struct rx_ring_info
*rp
= &rx_rings
[i
];
6273 niu_sync_rx_discard_stats(np
, rp
, 0);
6275 pkts
+= rp
->rx_packets
;
6276 bytes
+= rp
->rx_bytes
;
6277 dropped
+= rp
->rx_dropped
;
6278 errors
+= rp
->rx_errors
;
6282 np
->dev
->stats
.rx_packets
= pkts
;
6283 np
->dev
->stats
.rx_bytes
= bytes
;
6284 np
->dev
->stats
.rx_dropped
= dropped
;
6285 np
->dev
->stats
.rx_errors
= errors
;
6288 static void niu_get_tx_stats(struct niu
*np
)
6290 unsigned long pkts
, errors
, bytes
;
6291 struct tx_ring_info
*tx_rings
;
6294 pkts
= errors
= bytes
= 0;
6296 tx_rings
= ACCESS_ONCE(np
->tx_rings
);
6300 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6301 struct tx_ring_info
*rp
= &tx_rings
[i
];
6303 pkts
+= rp
->tx_packets
;
6304 bytes
+= rp
->tx_bytes
;
6305 errors
+= rp
->tx_errors
;
6309 np
->dev
->stats
.tx_packets
= pkts
;
6310 np
->dev
->stats
.tx_bytes
= bytes
;
6311 np
->dev
->stats
.tx_errors
= errors
;
6314 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
6316 struct niu
*np
= netdev_priv(dev
);
6318 if (netif_running(dev
)) {
6319 niu_get_rx_stats(np
);
6320 niu_get_tx_stats(np
);
6325 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
6329 for (i
= 0; i
< 16; i
++)
6330 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
6333 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
6337 for (i
= 0; i
< 16; i
++)
6338 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
6341 static void niu_load_hash(struct niu
*np
, u16
*hash
)
6343 if (np
->flags
& NIU_FLAGS_XMAC
)
6344 niu_load_hash_xmac(np
, hash
);
6346 niu_load_hash_bmac(np
, hash
);
6349 static void niu_set_rx_mode(struct net_device
*dev
)
6351 struct niu
*np
= netdev_priv(dev
);
6352 int i
, alt_cnt
, err
;
6353 struct netdev_hw_addr
*ha
;
6354 unsigned long flags
;
6355 u16 hash
[16] = { 0, };
6357 spin_lock_irqsave(&np
->lock
, flags
);
6358 niu_enable_rx_mac(np
, 0);
6360 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
6361 if (dev
->flags
& IFF_PROMISC
)
6362 np
->flags
|= NIU_FLAGS_PROMISC
;
6363 if ((dev
->flags
& IFF_ALLMULTI
) || (!netdev_mc_empty(dev
)))
6364 np
->flags
|= NIU_FLAGS_MCAST
;
6366 alt_cnt
= netdev_uc_count(dev
);
6367 if (alt_cnt
> niu_num_alt_addr(np
)) {
6369 np
->flags
|= NIU_FLAGS_PROMISC
;
6375 netdev_for_each_uc_addr(ha
, dev
) {
6376 err
= niu_set_alt_mac(np
, index
, ha
->addr
);
6378 netdev_warn(dev
, "Error %d adding alt mac %d\n",
6380 err
= niu_enable_alt_mac(np
, index
, 1);
6382 netdev_warn(dev
, "Error %d enabling alt mac %d\n",
6389 if (np
->flags
& NIU_FLAGS_XMAC
)
6393 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
6394 err
= niu_enable_alt_mac(np
, i
, 0);
6396 netdev_warn(dev
, "Error %d disabling alt mac %d\n",
6400 if (dev
->flags
& IFF_ALLMULTI
) {
6401 for (i
= 0; i
< 16; i
++)
6403 } else if (!netdev_mc_empty(dev
)) {
6404 netdev_for_each_mc_addr(ha
, dev
) {
6405 u32 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
6408 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
6412 if (np
->flags
& NIU_FLAGS_MCAST
)
6413 niu_load_hash(np
, hash
);
6415 niu_enable_rx_mac(np
, 1);
6416 spin_unlock_irqrestore(&np
->lock
, flags
);
6419 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
6421 struct niu
*np
= netdev_priv(dev
);
6422 struct sockaddr
*addr
= p
;
6423 unsigned long flags
;
6425 if (!is_valid_ether_addr(addr
->sa_data
))
6428 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
6430 if (!netif_running(dev
))
6433 spin_lock_irqsave(&np
->lock
, flags
);
6434 niu_enable_rx_mac(np
, 0);
6435 niu_set_primary_mac(np
, dev
->dev_addr
);
6436 niu_enable_rx_mac(np
, 1);
6437 spin_unlock_irqrestore(&np
->lock
, flags
);
6442 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6447 static void niu_netif_stop(struct niu
*np
)
6449 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6451 niu_disable_napi(np
);
6453 netif_tx_disable(np
->dev
);
6456 static void niu_netif_start(struct niu
*np
)
6458 /* NOTE: unconditional netif_wake_queue is only appropriate
6459 * so long as all callers are assured to have free tx slots
6460 * (such as after niu_init_hw).
6462 netif_tx_wake_all_queues(np
->dev
);
6464 niu_enable_napi(np
);
6466 niu_enable_interrupts(np
, 1);
6469 static void niu_reset_buffers(struct niu
*np
)
6474 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6475 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6477 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
6480 page
= rp
->rxhash
[j
];
6483 (struct page
*) page
->mapping
;
6484 u64 base
= page
->index
;
6485 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6486 rp
->rbr
[k
++] = cpu_to_le32(base
);
6490 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6491 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6496 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6498 rp
->rbr_pending
= 0;
6499 rp
->rbr_refill_pending
= 0;
6503 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6504 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6506 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6507 if (rp
->tx_buffs
[j
].skb
)
6508 (void) release_tx_packet(np
, rp
, j
);
6511 rp
->pending
= MAX_TX_RING_SIZE
;
6519 static void niu_reset_task(struct work_struct
*work
)
6521 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6522 unsigned long flags
;
6525 spin_lock_irqsave(&np
->lock
, flags
);
6526 if (!netif_running(np
->dev
)) {
6527 spin_unlock_irqrestore(&np
->lock
, flags
);
6531 spin_unlock_irqrestore(&np
->lock
, flags
);
6533 del_timer_sync(&np
->timer
);
6537 spin_lock_irqsave(&np
->lock
, flags
);
6541 spin_unlock_irqrestore(&np
->lock
, flags
);
6543 niu_reset_buffers(np
);
6545 spin_lock_irqsave(&np
->lock
, flags
);
6547 err
= niu_init_hw(np
);
6549 np
->timer
.expires
= jiffies
+ HZ
;
6550 add_timer(&np
->timer
);
6551 niu_netif_start(np
);
6554 spin_unlock_irqrestore(&np
->lock
, flags
);
6557 static void niu_tx_timeout(struct net_device
*dev
)
6559 struct niu
*np
= netdev_priv(dev
);
6561 dev_err(np
->device
, "%s: Transmit timed out, resetting\n",
6564 schedule_work(&np
->reset_task
);
6567 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6568 u64 mapping
, u64 len
, u64 mark
,
6571 __le64
*desc
= &rp
->descr
[index
];
6573 *desc
= cpu_to_le64(mark
|
6574 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6575 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6576 (mapping
& TX_DESC_SAD
));
6579 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6580 u64 pad_bytes
, u64 len
)
6582 u16 eth_proto
, eth_proto_inner
;
6583 u64 csum_bits
, l3off
, ihl
, ret
;
6587 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6588 eth_proto_inner
= eth_proto
;
6589 if (eth_proto
== ETH_P_8021Q
) {
6590 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6591 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6593 eth_proto_inner
= be16_to_cpu(val
);
6597 switch (skb
->protocol
) {
6598 case cpu_to_be16(ETH_P_IP
):
6599 ip_proto
= ip_hdr(skb
)->protocol
;
6600 ihl
= ip_hdr(skb
)->ihl
;
6602 case cpu_to_be16(ETH_P_IPV6
):
6603 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6612 csum_bits
= TXHDR_CSUM_NONE
;
6613 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6616 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6618 (ip_proto
== IPPROTO_UDP
?
6619 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6621 start
= skb_checksum_start_offset(skb
) -
6622 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6623 stuff
= start
+ skb
->csum_offset
;
6625 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6626 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6629 l3off
= skb_network_offset(skb
) -
6630 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6632 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6633 (len
<< TXHDR_LEN_SHIFT
) |
6634 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6635 (ihl
<< TXHDR_IHL_SHIFT
) |
6636 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6637 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6638 (ipv6
? TXHDR_IP_VER
: 0) |
6644 static netdev_tx_t
niu_start_xmit(struct sk_buff
*skb
,
6645 struct net_device
*dev
)
6647 struct niu
*np
= netdev_priv(dev
);
6648 unsigned long align
, headroom
;
6649 struct netdev_queue
*txq
;
6650 struct tx_ring_info
*rp
;
6651 struct tx_pkt_hdr
*tp
;
6652 unsigned int len
, nfg
;
6653 struct ethhdr
*ehdr
;
6657 i
= skb_get_queue_mapping(skb
);
6658 rp
= &np
->tx_rings
[i
];
6659 txq
= netdev_get_tx_queue(dev
, i
);
6661 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6662 netif_tx_stop_queue(txq
);
6663 dev_err(np
->device
, "%s: BUG! Tx ring full when queue awake!\n", dev
->name
);
6665 return NETDEV_TX_BUSY
;
6668 if (skb
->len
< ETH_ZLEN
) {
6669 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6671 if (skb_pad(skb
, pad_bytes
))
6673 skb_put(skb
, pad_bytes
);
6676 len
= sizeof(struct tx_pkt_hdr
) + 15;
6677 if (skb_headroom(skb
) < len
) {
6678 struct sk_buff
*skb_new
;
6680 skb_new
= skb_realloc_headroom(skb
, len
);
6690 align
= ((unsigned long) skb
->data
& (16 - 1));
6691 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6693 ehdr
= (struct ethhdr
*) skb
->data
;
6694 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6696 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6697 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6700 len
= skb_headlen(skb
);
6701 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6702 len
, DMA_TO_DEVICE
);
6706 rp
->tx_buffs
[prod
].skb
= skb
;
6707 rp
->tx_buffs
[prod
].mapping
= mapping
;
6710 if (++rp
->mark_counter
== rp
->mark_freq
) {
6711 rp
->mark_counter
= 0;
6712 mrk
|= TX_DESC_MARK
;
6717 nfg
= skb_shinfo(skb
)->nr_frags
;
6719 tlen
-= MAX_TX_DESC_LEN
;
6724 unsigned int this_len
= len
;
6726 if (this_len
> MAX_TX_DESC_LEN
)
6727 this_len
= MAX_TX_DESC_LEN
;
6729 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6732 prod
= NEXT_TX(rp
, prod
);
6733 mapping
+= this_len
;
6737 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6738 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6741 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6742 frag
->page_offset
, len
,
6745 rp
->tx_buffs
[prod
].skb
= NULL
;
6746 rp
->tx_buffs
[prod
].mapping
= mapping
;
6748 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6750 prod
= NEXT_TX(rp
, prod
);
6753 if (prod
< rp
->prod
)
6754 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6757 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6759 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6760 netif_tx_stop_queue(txq
);
6761 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6762 netif_tx_wake_queue(txq
);
6766 return NETDEV_TX_OK
;
6774 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6776 struct niu
*np
= netdev_priv(dev
);
6777 int err
, orig_jumbo
, new_jumbo
;
6779 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6782 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6783 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6787 if (!netif_running(dev
) ||
6788 (orig_jumbo
== new_jumbo
))
6791 niu_full_shutdown(np
, dev
);
6793 niu_free_channels(np
);
6795 niu_enable_napi(np
);
6797 err
= niu_alloc_channels(np
);
6801 spin_lock_irq(&np
->lock
);
6803 err
= niu_init_hw(np
);
6805 init_timer(&np
->timer
);
6806 np
->timer
.expires
= jiffies
+ HZ
;
6807 np
->timer
.data
= (unsigned long) np
;
6808 np
->timer
.function
= niu_timer
;
6810 err
= niu_enable_interrupts(np
, 1);
6815 spin_unlock_irq(&np
->lock
);
6818 netif_tx_start_all_queues(dev
);
6819 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6820 netif_carrier_on(dev
);
6822 add_timer(&np
->timer
);
6828 static void niu_get_drvinfo(struct net_device
*dev
,
6829 struct ethtool_drvinfo
*info
)
6831 struct niu
*np
= netdev_priv(dev
);
6832 struct niu_vpd
*vpd
= &np
->vpd
;
6834 strcpy(info
->driver
, DRV_MODULE_NAME
);
6835 strcpy(info
->version
, DRV_MODULE_VERSION
);
6836 sprintf(info
->fw_version
, "%d.%d",
6837 vpd
->fcode_major
, vpd
->fcode_minor
);
6838 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6839 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6842 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6844 struct niu
*np
= netdev_priv(dev
);
6845 struct niu_link_config
*lp
;
6847 lp
= &np
->link_config
;
6849 memset(cmd
, 0, sizeof(*cmd
));
6850 cmd
->phy_address
= np
->phy_addr
;
6851 cmd
->supported
= lp
->supported
;
6852 cmd
->advertising
= lp
->active_advertising
;
6853 cmd
->autoneg
= lp
->active_autoneg
;
6854 cmd
->speed
= lp
->active_speed
;
6855 cmd
->duplex
= lp
->active_duplex
;
6856 cmd
->port
= (np
->flags
& NIU_FLAGS_FIBER
) ? PORT_FIBRE
: PORT_TP
;
6857 cmd
->transceiver
= (np
->flags
& NIU_FLAGS_XCVR_SERDES
) ?
6858 XCVR_EXTERNAL
: XCVR_INTERNAL
;
6863 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6865 struct niu
*np
= netdev_priv(dev
);
6866 struct niu_link_config
*lp
= &np
->link_config
;
6868 lp
->advertising
= cmd
->advertising
;
6869 lp
->speed
= cmd
->speed
;
6870 lp
->duplex
= cmd
->duplex
;
6871 lp
->autoneg
= cmd
->autoneg
;
6872 return niu_init_link(np
);
6875 static u32
niu_get_msglevel(struct net_device
*dev
)
6877 struct niu
*np
= netdev_priv(dev
);
6878 return np
->msg_enable
;
6881 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6883 struct niu
*np
= netdev_priv(dev
);
6884 np
->msg_enable
= value
;
6887 static int niu_nway_reset(struct net_device
*dev
)
6889 struct niu
*np
= netdev_priv(dev
);
6891 if (np
->link_config
.autoneg
)
6892 return niu_init_link(np
);
6897 static int niu_get_eeprom_len(struct net_device
*dev
)
6899 struct niu
*np
= netdev_priv(dev
);
6901 return np
->eeprom_len
;
6904 static int niu_get_eeprom(struct net_device
*dev
,
6905 struct ethtool_eeprom
*eeprom
, u8
*data
)
6907 struct niu
*np
= netdev_priv(dev
);
6908 u32 offset
, len
, val
;
6910 offset
= eeprom
->offset
;
6913 if (offset
+ len
< offset
)
6915 if (offset
>= np
->eeprom_len
)
6917 if (offset
+ len
> np
->eeprom_len
)
6918 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6921 u32 b_offset
, b_count
;
6923 b_offset
= offset
& 3;
6924 b_count
= 4 - b_offset
;
6928 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6929 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6935 val
= nr64(ESPC_NCR(offset
/ 4));
6936 memcpy(data
, &val
, 4);
6942 val
= nr64(ESPC_NCR(offset
/ 4));
6943 memcpy(data
, &val
, len
);
6948 static void niu_ethflow_to_l3proto(int flow_type
, u8
*pid
)
6950 switch (flow_type
) {
6961 *pid
= IPPROTO_SCTP
;
6977 static int niu_class_to_ethflow(u64
class, int *flow_type
)
6980 case CLASS_CODE_TCP_IPV4
:
6981 *flow_type
= TCP_V4_FLOW
;
6983 case CLASS_CODE_UDP_IPV4
:
6984 *flow_type
= UDP_V4_FLOW
;
6986 case CLASS_CODE_AH_ESP_IPV4
:
6987 *flow_type
= AH_V4_FLOW
;
6989 case CLASS_CODE_SCTP_IPV4
:
6990 *flow_type
= SCTP_V4_FLOW
;
6992 case CLASS_CODE_TCP_IPV6
:
6993 *flow_type
= TCP_V6_FLOW
;
6995 case CLASS_CODE_UDP_IPV6
:
6996 *flow_type
= UDP_V6_FLOW
;
6998 case CLASS_CODE_AH_ESP_IPV6
:
6999 *flow_type
= AH_V6_FLOW
;
7001 case CLASS_CODE_SCTP_IPV6
:
7002 *flow_type
= SCTP_V6_FLOW
;
7004 case CLASS_CODE_USER_PROG1
:
7005 case CLASS_CODE_USER_PROG2
:
7006 case CLASS_CODE_USER_PROG3
:
7007 case CLASS_CODE_USER_PROG4
:
7008 *flow_type
= IP_USER_FLOW
;
7017 static int niu_ethflow_to_class(int flow_type
, u64
*class)
7019 switch (flow_type
) {
7021 *class = CLASS_CODE_TCP_IPV4
;
7024 *class = CLASS_CODE_UDP_IPV4
;
7028 *class = CLASS_CODE_AH_ESP_IPV4
;
7031 *class = CLASS_CODE_SCTP_IPV4
;
7034 *class = CLASS_CODE_TCP_IPV6
;
7037 *class = CLASS_CODE_UDP_IPV6
;
7041 *class = CLASS_CODE_AH_ESP_IPV6
;
7044 *class = CLASS_CODE_SCTP_IPV6
;
7053 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
7057 if (flow_key
& FLOW_KEY_L2DA
)
7058 ethflow
|= RXH_L2DA
;
7059 if (flow_key
& FLOW_KEY_VLAN
)
7060 ethflow
|= RXH_VLAN
;
7061 if (flow_key
& FLOW_KEY_IPSA
)
7062 ethflow
|= RXH_IP_SRC
;
7063 if (flow_key
& FLOW_KEY_IPDA
)
7064 ethflow
|= RXH_IP_DST
;
7065 if (flow_key
& FLOW_KEY_PROTO
)
7066 ethflow
|= RXH_L3_PROTO
;
7067 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
7068 ethflow
|= RXH_L4_B_0_1
;
7069 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
7070 ethflow
|= RXH_L4_B_2_3
;
7076 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
7080 if (ethflow
& RXH_L2DA
)
7081 key
|= FLOW_KEY_L2DA
;
7082 if (ethflow
& RXH_VLAN
)
7083 key
|= FLOW_KEY_VLAN
;
7084 if (ethflow
& RXH_IP_SRC
)
7085 key
|= FLOW_KEY_IPSA
;
7086 if (ethflow
& RXH_IP_DST
)
7087 key
|= FLOW_KEY_IPDA
;
7088 if (ethflow
& RXH_L3_PROTO
)
7089 key
|= FLOW_KEY_PROTO
;
7090 if (ethflow
& RXH_L4_B_0_1
)
7091 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
7092 if (ethflow
& RXH_L4_B_2_3
)
7093 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
7101 static int niu_get_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7107 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7110 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7112 nfc
->data
= RXH_DISCARD
;
7114 nfc
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
7115 CLASS_CODE_USER_PROG1
]);
7119 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry
*tp
,
7120 struct ethtool_rx_flow_spec
*fsp
)
7125 tmp
= (tp
->key
[3] & TCAM_V4KEY3_SADDR
) >> TCAM_V4KEY3_SADDR_SHIFT
;
7126 fsp
->h_u
.tcp_ip4_spec
.ip4src
= cpu_to_be32(tmp
);
7128 tmp
= (tp
->key
[3] & TCAM_V4KEY3_DADDR
) >> TCAM_V4KEY3_DADDR_SHIFT
;
7129 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= cpu_to_be32(tmp
);
7131 tmp
= (tp
->key_mask
[3] & TCAM_V4KEY3_SADDR
) >> TCAM_V4KEY3_SADDR_SHIFT
;
7132 fsp
->m_u
.tcp_ip4_spec
.ip4src
= cpu_to_be32(tmp
);
7134 tmp
= (tp
->key_mask
[3] & TCAM_V4KEY3_DADDR
) >> TCAM_V4KEY3_DADDR_SHIFT
;
7135 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= cpu_to_be32(tmp
);
7137 fsp
->h_u
.tcp_ip4_spec
.tos
= (tp
->key
[2] & TCAM_V4KEY2_TOS
) >>
7138 TCAM_V4KEY2_TOS_SHIFT
;
7139 fsp
->m_u
.tcp_ip4_spec
.tos
= (tp
->key_mask
[2] & TCAM_V4KEY2_TOS
) >>
7140 TCAM_V4KEY2_TOS_SHIFT
;
7142 switch (fsp
->flow_type
) {
7146 prt
= ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7147 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7148 fsp
->h_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(prt
);
7150 prt
= ((tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7151 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7152 fsp
->h_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(prt
);
7154 prt
= ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7155 TCAM_V4KEY2_PORT_SPI_SHIFT
) >> 16;
7156 fsp
->m_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(prt
);
7158 prt
= ((tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7159 TCAM_V4KEY2_PORT_SPI_SHIFT
) & 0xffff;
7160 fsp
->m_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(prt
);
7164 tmp
= (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7165 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7166 fsp
->h_u
.ah_ip4_spec
.spi
= cpu_to_be32(tmp
);
7168 tmp
= (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7169 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7170 fsp
->m_u
.ah_ip4_spec
.spi
= cpu_to_be32(tmp
);
7173 tmp
= (tp
->key
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7174 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7175 fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
= cpu_to_be32(tmp
);
7177 tmp
= (tp
->key_mask
[2] & TCAM_V4KEY2_PORT_SPI
) >>
7178 TCAM_V4KEY2_PORT_SPI_SHIFT
;
7179 fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
= cpu_to_be32(tmp
);
7181 fsp
->h_u
.usr_ip4_spec
.proto
=
7182 (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7183 TCAM_V4KEY2_PROTO_SHIFT
;
7184 fsp
->m_u
.usr_ip4_spec
.proto
=
7185 (tp
->key_mask
[2] & TCAM_V4KEY2_PROTO
) >>
7186 TCAM_V4KEY2_PROTO_SHIFT
;
7188 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
7195 static int niu_get_ethtool_tcam_entry(struct niu
*np
,
7196 struct ethtool_rxnfc
*nfc
)
7198 struct niu_parent
*parent
= np
->parent
;
7199 struct niu_tcam_entry
*tp
;
7200 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7205 idx
= tcam_get_index(np
, (u16
)nfc
->fs
.location
);
7207 tp
= &parent
->tcam
[idx
];
7209 netdev_info(np
->dev
, "niu%d: entry [%d] invalid for idx[%d]\n",
7210 parent
->index
, (u16
)nfc
->fs
.location
, idx
);
7214 /* fill the flow spec entry */
7215 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7216 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7217 ret
= niu_class_to_ethflow(class, &fsp
->flow_type
);
7220 netdev_info(np
->dev
, "niu%d: niu_class_to_ethflow failed\n",
7226 if (fsp
->flow_type
== AH_V4_FLOW
|| fsp
->flow_type
== AH_V6_FLOW
) {
7227 u32 proto
= (tp
->key
[2] & TCAM_V4KEY2_PROTO
) >>
7228 TCAM_V4KEY2_PROTO_SHIFT
;
7229 if (proto
== IPPROTO_ESP
) {
7230 if (fsp
->flow_type
== AH_V4_FLOW
)
7231 fsp
->flow_type
= ESP_V4_FLOW
;
7233 fsp
->flow_type
= ESP_V6_FLOW
;
7237 switch (fsp
->flow_type
) {
7243 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7250 /* Not yet implemented */
7254 niu_get_ip4fs_from_tcam_key(tp
, fsp
);
7264 if (tp
->assoc_data
& TCAM_ASSOCDATA_DISC
)
7265 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
7267 fsp
->ring_cookie
= (tp
->assoc_data
& TCAM_ASSOCDATA_OFFSET
) >>
7268 TCAM_ASSOCDATA_OFFSET_SHIFT
;
7270 /* put the tcam size here */
7271 nfc
->data
= tcam_get_size(np
);
7276 static int niu_get_ethtool_tcam_all(struct niu
*np
,
7277 struct ethtool_rxnfc
*nfc
,
7280 struct niu_parent
*parent
= np
->parent
;
7281 struct niu_tcam_entry
*tp
;
7283 unsigned long flags
;
7286 /* put the tcam size here */
7287 nfc
->data
= tcam_get_size(np
);
7289 niu_lock_parent(np
, flags
);
7290 for (cnt
= 0, i
= 0; i
< nfc
->data
; i
++) {
7291 idx
= tcam_get_index(np
, i
);
7292 tp
= &parent
->tcam
[idx
];
7295 if (cnt
== nfc
->rule_cnt
) {
7302 niu_unlock_parent(np
, flags
);
7307 static int niu_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
7310 struct niu
*np
= netdev_priv(dev
);
7315 ret
= niu_get_hash_opts(np
, cmd
);
7317 case ETHTOOL_GRXRINGS
:
7318 cmd
->data
= np
->num_rx_rings
;
7320 case ETHTOOL_GRXCLSRLCNT
:
7321 cmd
->rule_cnt
= tcam_get_valid_entry_cnt(np
);
7323 case ETHTOOL_GRXCLSRULE
:
7324 ret
= niu_get_ethtool_tcam_entry(np
, cmd
);
7326 case ETHTOOL_GRXCLSRLALL
:
7327 ret
= niu_get_ethtool_tcam_all(np
, cmd
, (u32
*)rule_locs
);
7337 static int niu_set_hash_opts(struct niu
*np
, struct ethtool_rxnfc
*nfc
)
7341 unsigned long flags
;
7343 if (!niu_ethflow_to_class(nfc
->flow_type
, &class))
7346 if (class < CLASS_CODE_USER_PROG1
||
7347 class > CLASS_CODE_SCTP_IPV6
)
7350 if (nfc
->data
& RXH_DISCARD
) {
7351 niu_lock_parent(np
, flags
);
7352 flow_key
= np
->parent
->tcam_key
[class -
7353 CLASS_CODE_USER_PROG1
];
7354 flow_key
|= TCAM_KEY_DISC
;
7355 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7356 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7357 niu_unlock_parent(np
, flags
);
7360 /* Discard was set before, but is not set now */
7361 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
7363 niu_lock_parent(np
, flags
);
7364 flow_key
= np
->parent
->tcam_key
[class -
7365 CLASS_CODE_USER_PROG1
];
7366 flow_key
&= ~TCAM_KEY_DISC
;
7367 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
7369 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
7371 niu_unlock_parent(np
, flags
);
7375 if (!niu_ethflow_to_flowkey(nfc
->data
, &flow_key
))
7378 niu_lock_parent(np
, flags
);
7379 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
7380 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
7381 niu_unlock_parent(np
, flags
);
7386 static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec
*fsp
,
7387 struct niu_tcam_entry
*tp
,
7388 int l2_rdc_tab
, u64
class)
7391 u32 sip
, dip
, sipm
, dipm
, spi
, spim
;
7392 u16 sport
, dport
, spm
, dpm
;
7394 sip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4src
);
7395 sipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4src
);
7396 dip
= be32_to_cpu(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
7397 dipm
= be32_to_cpu(fsp
->m_u
.tcp_ip4_spec
.ip4dst
);
7399 tp
->key
[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7400 tp
->key_mask
[0] = TCAM_V4KEY0_CLASS_CODE
;
7401 tp
->key
[1] = (u64
)l2_rdc_tab
<< TCAM_V4KEY1_L2RDCNUM_SHIFT
;
7402 tp
->key_mask
[1] = TCAM_V4KEY1_L2RDCNUM
;
7404 tp
->key
[3] = (u64
)sip
<< TCAM_V4KEY3_SADDR_SHIFT
;
7407 tp
->key_mask
[3] = (u64
)sipm
<< TCAM_V4KEY3_SADDR_SHIFT
;
7408 tp
->key_mask
[3] |= dipm
;
7410 tp
->key
[2] |= ((u64
)fsp
->h_u
.tcp_ip4_spec
.tos
<<
7411 TCAM_V4KEY2_TOS_SHIFT
);
7412 tp
->key_mask
[2] |= ((u64
)fsp
->m_u
.tcp_ip4_spec
.tos
<<
7413 TCAM_V4KEY2_TOS_SHIFT
);
7414 switch (fsp
->flow_type
) {
7418 sport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.psrc
);
7419 spm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.psrc
);
7420 dport
= be16_to_cpu(fsp
->h_u
.tcp_ip4_spec
.pdst
);
7421 dpm
= be16_to_cpu(fsp
->m_u
.tcp_ip4_spec
.pdst
);
7423 tp
->key
[2] |= (((u64
)sport
<< 16) | dport
);
7424 tp
->key_mask
[2] |= (((u64
)spm
<< 16) | dpm
);
7425 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7429 spi
= be32_to_cpu(fsp
->h_u
.ah_ip4_spec
.spi
);
7430 spim
= be32_to_cpu(fsp
->m_u
.ah_ip4_spec
.spi
);
7433 tp
->key_mask
[2] |= spim
;
7434 niu_ethflow_to_l3proto(fsp
->flow_type
, &pid
);
7437 spi
= be32_to_cpu(fsp
->h_u
.usr_ip4_spec
.l4_4_bytes
);
7438 spim
= be32_to_cpu(fsp
->m_u
.usr_ip4_spec
.l4_4_bytes
);
7441 tp
->key_mask
[2] |= spim
;
7442 pid
= fsp
->h_u
.usr_ip4_spec
.proto
;
7448 tp
->key
[2] |= ((u64
)pid
<< TCAM_V4KEY2_PROTO_SHIFT
);
7450 tp
->key_mask
[2] |= TCAM_V4KEY2_PROTO
;
7454 static int niu_add_ethtool_tcam_entry(struct niu
*np
,
7455 struct ethtool_rxnfc
*nfc
)
7457 struct niu_parent
*parent
= np
->parent
;
7458 struct niu_tcam_entry
*tp
;
7459 struct ethtool_rx_flow_spec
*fsp
= &nfc
->fs
;
7460 struct niu_rdc_tables
*rdc_table
= &parent
->rdc_group_cfg
[np
->port
];
7461 int l2_rdc_table
= rdc_table
->first_table_num
;
7464 unsigned long flags
;
7469 idx
= nfc
->fs
.location
;
7470 if (idx
>= tcam_get_size(np
))
7473 if (fsp
->flow_type
== IP_USER_FLOW
) {
7475 int add_usr_cls
= 0;
7476 struct ethtool_usrip4_spec
*uspec
= &fsp
->h_u
.usr_ip4_spec
;
7477 struct ethtool_usrip4_spec
*umask
= &fsp
->m_u
.usr_ip4_spec
;
7479 if (uspec
->ip_ver
!= ETH_RX_NFC_IP4
)
7482 niu_lock_parent(np
, flags
);
7484 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7485 if (parent
->l3_cls
[i
]) {
7486 if (uspec
->proto
== parent
->l3_cls_pid
[i
]) {
7487 class = parent
->l3_cls
[i
];
7488 parent
->l3_cls_refcnt
[i
]++;
7493 /* Program new user IP class */
7496 class = CLASS_CODE_USER_PROG1
;
7499 class = CLASS_CODE_USER_PROG2
;
7502 class = CLASS_CODE_USER_PROG3
;
7505 class = CLASS_CODE_USER_PROG4
;
7510 ret
= tcam_user_ip_class_set(np
, class, 0,
7517 ret
= tcam_user_ip_class_enable(np
, class, 1);
7520 parent
->l3_cls
[i
] = class;
7521 parent
->l3_cls_pid
[i
] = uspec
->proto
;
7522 parent
->l3_cls_refcnt
[i
]++;
7528 netdev_info(np
->dev
, "niu%d: %s(): Could not find/insert class for pid %d\n",
7529 parent
->index
, __func__
, uspec
->proto
);
7533 niu_unlock_parent(np
, flags
);
7535 if (!niu_ethflow_to_class(fsp
->flow_type
, &class)) {
7540 niu_lock_parent(np
, flags
);
7542 idx
= tcam_get_index(np
, idx
);
7543 tp
= &parent
->tcam
[idx
];
7545 memset(tp
, 0, sizeof(*tp
));
7547 /* fill in the tcam key and mask */
7548 switch (fsp
->flow_type
) {
7554 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7561 /* Not yet implemented */
7562 netdev_info(np
->dev
, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7563 parent
->index
, __func__
, fsp
->flow_type
);
7567 niu_get_tcamkey_from_ip4fs(fsp
, tp
, l2_rdc_table
, class);
7570 netdev_info(np
->dev
, "niu%d: In %s(): Unknown flow type %d\n",
7571 parent
->index
, __func__
, fsp
->flow_type
);
7576 /* fill in the assoc data */
7577 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
7578 tp
->assoc_data
= TCAM_ASSOCDATA_DISC
;
7580 if (fsp
->ring_cookie
>= np
->num_rx_rings
) {
7581 netdev_info(np
->dev
, "niu%d: In %s(): Invalid RX ring %lld\n",
7582 parent
->index
, __func__
,
7583 (long long)fsp
->ring_cookie
);
7587 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
7588 (fsp
->ring_cookie
<<
7589 TCAM_ASSOCDATA_OFFSET_SHIFT
));
7592 err
= tcam_write(np
, idx
, tp
->key
, tp
->key_mask
);
7597 err
= tcam_assoc_write(np
, idx
, tp
->assoc_data
);
7603 /* validate the entry */
7605 np
->clas
.tcam_valid_entries
++;
7607 niu_unlock_parent(np
, flags
);
7612 static int niu_del_ethtool_tcam_entry(struct niu
*np
, u32 loc
)
7614 struct niu_parent
*parent
= np
->parent
;
7615 struct niu_tcam_entry
*tp
;
7617 unsigned long flags
;
7621 if (loc
>= tcam_get_size(np
))
7624 niu_lock_parent(np
, flags
);
7626 idx
= tcam_get_index(np
, loc
);
7627 tp
= &parent
->tcam
[idx
];
7629 /* if the entry is of a user defined class, then update*/
7630 class = (tp
->key
[0] & TCAM_V4KEY0_CLASS_CODE
) >>
7631 TCAM_V4KEY0_CLASS_CODE_SHIFT
;
7633 if (class >= CLASS_CODE_USER_PROG1
&& class <= CLASS_CODE_USER_PROG4
) {
7635 for (i
= 0; i
< NIU_L3_PROG_CLS
; i
++) {
7636 if (parent
->l3_cls
[i
] == class) {
7637 parent
->l3_cls_refcnt
[i
]--;
7638 if (!parent
->l3_cls_refcnt
[i
]) {
7640 ret
= tcam_user_ip_class_enable(np
,
7645 parent
->l3_cls
[i
] = 0;
7646 parent
->l3_cls_pid
[i
] = 0;
7651 if (i
== NIU_L3_PROG_CLS
) {
7652 netdev_info(np
->dev
, "niu%d: In %s(): Usr class 0x%llx not found\n",
7653 parent
->index
, __func__
,
7654 (unsigned long long)class);
7660 ret
= tcam_flush(np
, idx
);
7664 /* invalidate the entry */
7666 np
->clas
.tcam_valid_entries
--;
7668 niu_unlock_parent(np
, flags
);
7673 static int niu_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
7675 struct niu
*np
= netdev_priv(dev
);
7680 ret
= niu_set_hash_opts(np
, cmd
);
7682 case ETHTOOL_SRXCLSRLINS
:
7683 ret
= niu_add_ethtool_tcam_entry(np
, cmd
);
7685 case ETHTOOL_SRXCLSRLDEL
:
7686 ret
= niu_del_ethtool_tcam_entry(np
, cmd
->fs
.location
);
7696 static const struct {
7697 const char string
[ETH_GSTRING_LEN
];
7698 } niu_xmac_stat_keys
[] = {
7701 { "tx_fifo_errors" },
7702 { "tx_overflow_errors" },
7703 { "tx_max_pkt_size_errors" },
7704 { "tx_underflow_errors" },
7705 { "rx_local_faults" },
7706 { "rx_remote_faults" },
7707 { "rx_link_faults" },
7708 { "rx_align_errors" },
7720 { "rx_code_violations" },
7721 { "rx_len_errors" },
7722 { "rx_crc_errors" },
7723 { "rx_underflows" },
7725 { "pause_off_state" },
7726 { "pause_on_state" },
7727 { "pause_received" },
7730 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
7732 static const struct {
7733 const char string
[ETH_GSTRING_LEN
];
7734 } niu_bmac_stat_keys
[] = {
7735 { "tx_underflow_errors" },
7736 { "tx_max_pkt_size_errors" },
7741 { "rx_align_errors" },
7742 { "rx_crc_errors" },
7743 { "rx_len_errors" },
7744 { "pause_off_state" },
7745 { "pause_on_state" },
7746 { "pause_received" },
7749 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
7751 static const struct {
7752 const char string
[ETH_GSTRING_LEN
];
7753 } niu_rxchan_stat_keys
[] = {
7761 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
7763 static const struct {
7764 const char string
[ETH_GSTRING_LEN
];
7765 } niu_txchan_stat_keys
[] = {
7772 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
7774 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
7776 struct niu
*np
= netdev_priv(dev
);
7779 if (stringset
!= ETH_SS_STATS
)
7782 if (np
->flags
& NIU_FLAGS_XMAC
) {
7783 memcpy(data
, niu_xmac_stat_keys
,
7784 sizeof(niu_xmac_stat_keys
));
7785 data
+= sizeof(niu_xmac_stat_keys
);
7787 memcpy(data
, niu_bmac_stat_keys
,
7788 sizeof(niu_bmac_stat_keys
));
7789 data
+= sizeof(niu_bmac_stat_keys
);
7791 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7792 memcpy(data
, niu_rxchan_stat_keys
,
7793 sizeof(niu_rxchan_stat_keys
));
7794 data
+= sizeof(niu_rxchan_stat_keys
);
7796 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7797 memcpy(data
, niu_txchan_stat_keys
,
7798 sizeof(niu_txchan_stat_keys
));
7799 data
+= sizeof(niu_txchan_stat_keys
);
7803 static int niu_get_sset_count(struct net_device
*dev
, int stringset
)
7805 struct niu
*np
= netdev_priv(dev
);
7807 if (stringset
!= ETH_SS_STATS
)
7810 return (np
->flags
& NIU_FLAGS_XMAC
?
7811 NUM_XMAC_STAT_KEYS
:
7812 NUM_BMAC_STAT_KEYS
) +
7813 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
7814 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
);
7817 static void niu_get_ethtool_stats(struct net_device
*dev
,
7818 struct ethtool_stats
*stats
, u64
*data
)
7820 struct niu
*np
= netdev_priv(dev
);
7823 niu_sync_mac_stats(np
);
7824 if (np
->flags
& NIU_FLAGS_XMAC
) {
7825 memcpy(data
, &np
->mac_stats
.xmac
,
7826 sizeof(struct niu_xmac_stats
));
7827 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
7829 memcpy(data
, &np
->mac_stats
.bmac
,
7830 sizeof(struct niu_bmac_stats
));
7831 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
7833 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
7834 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
7836 niu_sync_rx_discard_stats(np
, rp
, 0);
7838 data
[0] = rp
->rx_channel
;
7839 data
[1] = rp
->rx_packets
;
7840 data
[2] = rp
->rx_bytes
;
7841 data
[3] = rp
->rx_dropped
;
7842 data
[4] = rp
->rx_errors
;
7845 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
7846 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
7848 data
[0] = rp
->tx_channel
;
7849 data
[1] = rp
->tx_packets
;
7850 data
[2] = rp
->tx_bytes
;
7851 data
[3] = rp
->tx_errors
;
7856 static u64
niu_led_state_save(struct niu
*np
)
7858 if (np
->flags
& NIU_FLAGS_XMAC
)
7859 return nr64_mac(XMAC_CONFIG
);
7861 return nr64_mac(BMAC_XIF_CONFIG
);
7864 static void niu_led_state_restore(struct niu
*np
, u64 val
)
7866 if (np
->flags
& NIU_FLAGS_XMAC
)
7867 nw64_mac(XMAC_CONFIG
, val
);
7869 nw64_mac(BMAC_XIF_CONFIG
, val
);
7872 static void niu_force_led(struct niu
*np
, int on
)
7876 if (np
->flags
& NIU_FLAGS_XMAC
) {
7878 bit
= XMAC_CONFIG_FORCE_LED_ON
;
7880 reg
= BMAC_XIF_CONFIG
;
7881 bit
= BMAC_XIF_CONFIG_LINK_LED
;
7884 val
= nr64_mac(reg
);
7892 static int niu_phys_id(struct net_device
*dev
, u32 data
)
7894 struct niu
*np
= netdev_priv(dev
);
7898 if (!netif_running(dev
))
7904 orig_led_state
= niu_led_state_save(np
);
7905 for (i
= 0; i
< (data
* 2); i
++) {
7906 int on
= ((i
% 2) == 0);
7908 niu_force_led(np
, on
);
7910 if (msleep_interruptible(500))
7913 niu_led_state_restore(np
, orig_led_state
);
7918 static int niu_set_flags(struct net_device
*dev
, u32 data
)
7920 return ethtool_op_set_flags(dev
, data
, ETH_FLAG_RXHASH
);
7923 static const struct ethtool_ops niu_ethtool_ops
= {
7924 .get_drvinfo
= niu_get_drvinfo
,
7925 .get_link
= ethtool_op_get_link
,
7926 .get_msglevel
= niu_get_msglevel
,
7927 .set_msglevel
= niu_set_msglevel
,
7928 .nway_reset
= niu_nway_reset
,
7929 .get_eeprom_len
= niu_get_eeprom_len
,
7930 .get_eeprom
= niu_get_eeprom
,
7931 .get_settings
= niu_get_settings
,
7932 .set_settings
= niu_set_settings
,
7933 .get_strings
= niu_get_strings
,
7934 .get_sset_count
= niu_get_sset_count
,
7935 .get_ethtool_stats
= niu_get_ethtool_stats
,
7936 .phys_id
= niu_phys_id
,
7937 .get_rxnfc
= niu_get_nfc
,
7938 .set_rxnfc
= niu_set_nfc
,
7939 .set_flags
= niu_set_flags
,
7940 .get_flags
= ethtool_op_get_flags
,
7943 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
7946 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
7948 if (ldn
< 0 || ldn
> LDN_MAX
)
7951 parent
->ldg_map
[ldn
] = ldg
;
7953 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
7954 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7955 * the firmware, and we're not supposed to change them.
7956 * Validate the mapping, because if it's wrong we probably
7957 * won't get any interrupts and that's painful to debug.
7959 if (nr64(LDG_NUM(ldn
)) != ldg
) {
7960 dev_err(np
->device
, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7962 (unsigned long long) nr64(LDG_NUM(ldn
)));
7966 nw64(LDG_NUM(ldn
), ldg
);
7971 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
7973 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
7977 nw64(LDG_TIMER_RES
, res
);
7982 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
7984 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
7985 (func
< 0 || func
> 3) ||
7986 (vector
< 0 || vector
> 0x1f))
7989 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
7994 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
7996 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
7997 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
8000 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
8004 nw64(ESPC_PIO_STAT
, frame
);
8008 frame
= nr64(ESPC_PIO_STAT
);
8009 if (frame
& ESPC_PIO_STAT_READ_END
)
8012 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8013 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
8014 (unsigned long long) frame
);
8019 nw64(ESPC_PIO_STAT
, frame
);
8023 frame
= nr64(ESPC_PIO_STAT
);
8024 if (frame
& ESPC_PIO_STAT_READ_END
)
8027 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
8028 dev_err(np
->device
, "EEPROM read timeout frame[%llx]\n",
8029 (unsigned long long) frame
);
8033 frame
= nr64(ESPC_PIO_STAT
);
8034 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
8037 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
8039 int err
= niu_pci_eeprom_read(np
, off
);
8045 err
= niu_pci_eeprom_read(np
, off
+ 1);
8048 val
|= (err
& 0xff);
8053 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
8055 int err
= niu_pci_eeprom_read(np
, off
);
8062 err
= niu_pci_eeprom_read(np
, off
+ 1);
8066 val
|= (err
& 0xff) << 8;
8071 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
8078 for (i
= 0; i
< namebuf_len
; i
++) {
8079 int err
= niu_pci_eeprom_read(np
, off
+ i
);
8086 if (i
>= namebuf_len
)
8092 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
8094 struct niu_vpd
*vpd
= &np
->vpd
;
8095 int len
= strlen(vpd
->version
) + 1;
8096 const char *s
= vpd
->version
;
8099 for (i
= 0; i
< len
- 5; i
++) {
8100 if (!strncmp(s
+ i
, "FCode ", 6))
8107 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
8109 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8110 "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8111 vpd
->fcode_major
, vpd
->fcode_minor
);
8112 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
8113 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
8114 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
8115 np
->flags
|= NIU_FLAGS_VPD_VALID
;
8118 /* ESPC_PIO_EN_ENABLE must be set */
8119 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
8122 unsigned int found_mask
= 0;
8123 #define FOUND_MASK_MODEL 0x00000001
8124 #define FOUND_MASK_BMODEL 0x00000002
8125 #define FOUND_MASK_VERS 0x00000004
8126 #define FOUND_MASK_MAC 0x00000008
8127 #define FOUND_MASK_NMAC 0x00000010
8128 #define FOUND_MASK_PHY 0x00000020
8129 #define FOUND_MASK_ALL 0x0000003f
8131 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8132 "VPD_SCAN: start[%x] end[%x]\n", start
, end
);
8133 while (start
< end
) {
8134 int len
, err
, instance
, type
, prop_len
;
8139 if (found_mask
== FOUND_MASK_ALL
) {
8140 niu_vpd_parse_version(np
);
8144 err
= niu_pci_eeprom_read(np
, start
+ 2);
8150 instance
= niu_pci_eeprom_read(np
, start
);
8151 type
= niu_pci_eeprom_read(np
, start
+ 3);
8152 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
8153 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
8159 if (!strcmp(namebuf
, "model")) {
8160 prop_buf
= np
->vpd
.model
;
8161 max_len
= NIU_VPD_MODEL_MAX
;
8162 found_mask
|= FOUND_MASK_MODEL
;
8163 } else if (!strcmp(namebuf
, "board-model")) {
8164 prop_buf
= np
->vpd
.board_model
;
8165 max_len
= NIU_VPD_BD_MODEL_MAX
;
8166 found_mask
|= FOUND_MASK_BMODEL
;
8167 } else if (!strcmp(namebuf
, "version")) {
8168 prop_buf
= np
->vpd
.version
;
8169 max_len
= NIU_VPD_VERSION_MAX
;
8170 found_mask
|= FOUND_MASK_VERS
;
8171 } else if (!strcmp(namebuf
, "local-mac-address")) {
8172 prop_buf
= np
->vpd
.local_mac
;
8174 found_mask
|= FOUND_MASK_MAC
;
8175 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
8176 prop_buf
= &np
->vpd
.mac_num
;
8178 found_mask
|= FOUND_MASK_NMAC
;
8179 } else if (!strcmp(namebuf
, "phy-type")) {
8180 prop_buf
= np
->vpd
.phy_type
;
8181 max_len
= NIU_VPD_PHY_TYPE_MAX
;
8182 found_mask
|= FOUND_MASK_PHY
;
8185 if (max_len
&& prop_len
> max_len
) {
8186 dev_err(np
->device
, "Property '%s' length (%d) is too long\n", namebuf
, prop_len
);
8191 u32 off
= start
+ 5 + err
;
8194 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8195 "VPD_SCAN: Reading in property [%s] len[%d]\n",
8197 for (i
= 0; i
< prop_len
; i
++)
8198 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
8207 /* ESPC_PIO_EN_ENABLE must be set */
8208 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
8213 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
8219 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
8220 u32 here
= start
+ offset
;
8223 err
= niu_pci_eeprom_read(np
, here
);
8227 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
8231 here
= start
+ offset
+ 3;
8232 end
= start
+ offset
+ err
;
8236 err
= niu_pci_vpd_scan_props(np
, here
, end
);
8237 if (err
< 0 || err
== 1)
8242 /* ESPC_PIO_EN_ENABLE must be set */
8243 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
8245 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
8248 while (start
< end
) {
8251 /* ROM header signature? */
8252 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8256 /* Apply offset to PCI data structure. */
8257 err
= niu_pci_eeprom_read16(np
, start
+ 23);
8262 /* Check for "PCIR" signature. */
8263 err
= niu_pci_eeprom_read16(np
, start
+ 0);
8266 err
= niu_pci_eeprom_read16(np
, start
+ 2);
8270 /* Check for OBP image type. */
8271 err
= niu_pci_eeprom_read(np
, start
+ 20);
8275 err
= niu_pci_eeprom_read(np
, ret
+ 2);
8279 start
= ret
+ (err
* 512);
8283 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
8288 err
= niu_pci_eeprom_read(np
, ret
+ 0);
8298 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
8299 const char *phy_prop
)
8301 if (!strcmp(phy_prop
, "mif")) {
8302 /* 1G copper, MII */
8303 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8305 np
->mac_xcvr
= MAC_XCVR_MII
;
8306 } else if (!strcmp(phy_prop
, "xgf")) {
8307 /* 10G fiber, XPCS */
8308 np
->flags
|= (NIU_FLAGS_10G
|
8310 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8311 } else if (!strcmp(phy_prop
, "pcs")) {
8313 np
->flags
&= ~NIU_FLAGS_10G
;
8314 np
->flags
|= NIU_FLAGS_FIBER
;
8315 np
->mac_xcvr
= MAC_XCVR_PCS
;
8316 } else if (!strcmp(phy_prop
, "xgc")) {
8317 /* 10G copper, XPCS */
8318 np
->flags
|= NIU_FLAGS_10G
;
8319 np
->flags
&= ~NIU_FLAGS_FIBER
;
8320 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8321 } else if (!strcmp(phy_prop
, "xgsd") || !strcmp(phy_prop
, "gsd")) {
8322 /* 10G Serdes or 1G Serdes, default to 10G */
8323 np
->flags
|= NIU_FLAGS_10G
;
8324 np
->flags
&= ~NIU_FLAGS_FIBER
;
8325 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8326 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8333 static int niu_pci_vpd_get_nports(struct niu
*np
)
8337 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
8338 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
8339 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
8340 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
8341 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
8343 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
8344 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
8345 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
8346 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
8353 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
8355 struct net_device
*dev
= np
->dev
;
8356 struct niu_vpd
*vpd
= &np
->vpd
;
8359 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
8360 dev_err(np
->device
, "VPD MAC invalid, falling back to SPROM\n");
8362 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8366 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8367 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8368 np
->flags
|= NIU_FLAGS_10G
;
8369 np
->flags
&= ~NIU_FLAGS_FIBER
;
8370 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
8371 np
->mac_xcvr
= MAC_XCVR_PCS
;
8373 np
->flags
|= NIU_FLAGS_FIBER
;
8374 np
->flags
&= ~NIU_FLAGS_10G
;
8376 if (np
->flags
& NIU_FLAGS_10G
)
8377 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8378 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8379 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
8380 NIU_FLAGS_HOTPLUG_PHY
);
8381 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8382 dev_err(np
->device
, "Illegal phy string [%s]\n",
8384 dev_err(np
->device
, "Falling back to SPROM\n");
8385 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
8389 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
8391 val8
= dev
->perm_addr
[5];
8392 dev
->perm_addr
[5] += np
->port
;
8393 if (dev
->perm_addr
[5] < val8
)
8394 dev
->perm_addr
[4]++;
8396 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8399 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
8401 struct net_device
*dev
= np
->dev
;
8406 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
8407 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
8410 np
->eeprom_len
= len
;
8412 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8413 "SPROM: Image size %llu\n", (unsigned long long)val
);
8416 for (i
= 0; i
< len
; i
++) {
8417 val
= nr64(ESPC_NCR(i
));
8418 sum
+= (val
>> 0) & 0xff;
8419 sum
+= (val
>> 8) & 0xff;
8420 sum
+= (val
>> 16) & 0xff;
8421 sum
+= (val
>> 24) & 0xff;
8423 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8424 "SPROM: Checksum %x\n", (int)(sum
& 0xff));
8425 if ((sum
& 0xff) != 0xab) {
8426 dev_err(np
->device
, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum
& 0xff));
8430 val
= nr64(ESPC_PHY_TYPE
);
8433 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
8434 ESPC_PHY_TYPE_PORT0_SHIFT
;
8437 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
8438 ESPC_PHY_TYPE_PORT1_SHIFT
;
8441 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
8442 ESPC_PHY_TYPE_PORT2_SHIFT
;
8445 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
8446 ESPC_PHY_TYPE_PORT3_SHIFT
;
8449 dev_err(np
->device
, "Bogus port number %u\n",
8453 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8454 "SPROM: PHY type %x\n", val8
);
8457 case ESPC_PHY_TYPE_1G_COPPER
:
8458 /* 1G copper, MII */
8459 np
->flags
&= ~(NIU_FLAGS_FIBER
|
8461 np
->mac_xcvr
= MAC_XCVR_MII
;
8464 case ESPC_PHY_TYPE_1G_FIBER
:
8466 np
->flags
&= ~NIU_FLAGS_10G
;
8467 np
->flags
|= NIU_FLAGS_FIBER
;
8468 np
->mac_xcvr
= MAC_XCVR_PCS
;
8471 case ESPC_PHY_TYPE_10G_COPPER
:
8472 /* 10G copper, XPCS */
8473 np
->flags
|= NIU_FLAGS_10G
;
8474 np
->flags
&= ~NIU_FLAGS_FIBER
;
8475 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8478 case ESPC_PHY_TYPE_10G_FIBER
:
8479 /* 10G fiber, XPCS */
8480 np
->flags
|= (NIU_FLAGS_10G
|
8482 np
->mac_xcvr
= MAC_XCVR_XPCS
;
8486 dev_err(np
->device
, "Bogus SPROM phy type %u\n", val8
);
8490 val
= nr64(ESPC_MAC_ADDR0
);
8491 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8492 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val
);
8493 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
8494 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
8495 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
8496 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
8498 val
= nr64(ESPC_MAC_ADDR1
);
8499 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8500 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val
);
8501 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
8502 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
8504 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8505 dev_err(np
->device
, "SPROM MAC address invalid [ %pM ]\n",
8510 val8
= dev
->perm_addr
[5];
8511 dev
->perm_addr
[5] += np
->port
;
8512 if (dev
->perm_addr
[5] < val8
)
8513 dev
->perm_addr
[4]++;
8515 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8517 val
= nr64(ESPC_MOD_STR_LEN
);
8518 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8519 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8523 for (i
= 0; i
< val
; i
+= 4) {
8524 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
8526 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
8527 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
8528 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
8529 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
8531 np
->vpd
.model
[val
] = '\0';
8533 val
= nr64(ESPC_BD_MOD_STR_LEN
);
8534 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8535 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val
);
8539 for (i
= 0; i
< val
; i
+= 4) {
8540 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
8542 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
8543 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
8544 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
8545 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
8547 np
->vpd
.board_model
[val
] = '\0';
8550 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
8551 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
8552 "SPROM: NUM_PORTS_MACS[%d]\n", np
->vpd
.mac_num
);
8557 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
8559 struct niu_parent
*parent
= np
->parent
;
8562 np
->flags
|= NIU_FLAGS_XMAC
;
8564 if (!parent
->num_ports
) {
8565 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8566 parent
->num_ports
= 2;
8568 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
8569 if (!parent
->num_ports
) {
8570 /* Fall back to SPROM as last resort.
8571 * This will fail on most cards.
8573 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
8574 ESPC_NUM_PORTS_MACS_VAL
;
8576 /* All of the current probing methods fail on
8577 * Maramba on-board parts.
8579 if (!parent
->num_ports
)
8580 parent
->num_ports
= 4;
8585 if (np
->port
>= parent
->num_ports
)
8591 static int __devinit
phy_record(struct niu_parent
*parent
,
8592 struct phy_probe_info
*p
,
8593 int dev_id_1
, int dev_id_2
, u8 phy_port
,
8596 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
8599 if (dev_id_1
< 0 || dev_id_2
< 0)
8601 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
8602 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
8603 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
8604 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
8607 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
8611 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8613 type
== PHY_TYPE_PMA_PMD
? "PMA/PMD" :
8614 type
== PHY_TYPE_PCS
? "PCS" : "MII",
8617 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
8618 pr_err("Too many PHY ports\n");
8622 p
->phy_id
[type
][idx
] = id
;
8623 p
->phy_port
[type
][idx
] = phy_port
;
8624 p
->cur
[type
] = idx
+ 1;
8628 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
8632 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
8633 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
8636 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
8637 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
8644 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
8650 for (port
= 8; port
< 32; port
++) {
8651 if (port_has_10g(p
, port
)) {
8661 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
8664 if (p
->cur
[PHY_TYPE_MII
])
8665 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
8667 return p
->cur
[PHY_TYPE_MII
];
8670 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
8672 int num_ports
= parent
->num_ports
;
8675 for (i
= 0; i
< num_ports
; i
++) {
8676 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
8677 parent
->txchan_per_port
[i
] = (16 / num_ports
);
8679 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8681 parent
->rxchan_per_port
[i
],
8682 parent
->txchan_per_port
[i
]);
8686 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
8687 int num_10g
, int num_1g
)
8689 int num_ports
= parent
->num_ports
;
8690 int rx_chans_per_10g
, rx_chans_per_1g
;
8691 int tx_chans_per_10g
, tx_chans_per_1g
;
8692 int i
, tot_rx
, tot_tx
;
8694 if (!num_10g
|| !num_1g
) {
8695 rx_chans_per_10g
= rx_chans_per_1g
=
8696 (NIU_NUM_RXCHAN
/ num_ports
);
8697 tx_chans_per_10g
= tx_chans_per_1g
=
8698 (NIU_NUM_TXCHAN
/ num_ports
);
8700 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
8701 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
8702 (rx_chans_per_1g
* num_1g
)) /
8705 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
8706 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
8707 (tx_chans_per_1g
* num_1g
)) /
8711 tot_rx
= tot_tx
= 0;
8712 for (i
= 0; i
< num_ports
; i
++) {
8713 int type
= phy_decode(parent
->port_phy
, i
);
8715 if (type
== PORT_TYPE_10G
) {
8716 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
8717 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
8719 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
8720 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
8722 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8724 parent
->rxchan_per_port
[i
],
8725 parent
->txchan_per_port
[i
]);
8726 tot_rx
+= parent
->rxchan_per_port
[i
];
8727 tot_tx
+= parent
->txchan_per_port
[i
];
8730 if (tot_rx
> NIU_NUM_RXCHAN
) {
8731 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8732 parent
->index
, tot_rx
);
8733 for (i
= 0; i
< num_ports
; i
++)
8734 parent
->rxchan_per_port
[i
] = 1;
8736 if (tot_tx
> NIU_NUM_TXCHAN
) {
8737 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8738 parent
->index
, tot_tx
);
8739 for (i
= 0; i
< num_ports
; i
++)
8740 parent
->txchan_per_port
[i
] = 1;
8742 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
8743 pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8744 parent
->index
, tot_rx
, tot_tx
);
8748 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
8749 int num_10g
, int num_1g
)
8751 int i
, num_ports
= parent
->num_ports
;
8752 int rdc_group
, rdc_groups_per_port
;
8753 int rdc_channel_base
;
8756 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
8758 rdc_channel_base
= 0;
8760 for (i
= 0; i
< num_ports
; i
++) {
8761 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
8762 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
8763 int this_channel_offset
;
8765 tp
->first_table_num
= rdc_group
;
8766 tp
->num_tables
= rdc_groups_per_port
;
8767 this_channel_offset
= 0;
8768 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
8769 struct rdc_table
*rt
= &tp
->tables
[grp
];
8772 pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8773 parent
->index
, i
, tp
->first_table_num
+ grp
);
8774 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
8775 rt
->rxdma_channel
[slot
] =
8776 rdc_channel_base
+ this_channel_offset
;
8778 pr_cont("%d ", rt
->rxdma_channel
[slot
]);
8780 if (++this_channel_offset
== num_channels
)
8781 this_channel_offset
= 0;
8786 parent
->rdc_default
[i
] = rdc_channel_base
;
8788 rdc_channel_base
+= num_channels
;
8789 rdc_group
+= rdc_groups_per_port
;
8793 static int __devinit
fill_phy_probe_info(struct niu
*np
,
8794 struct niu_parent
*parent
,
8795 struct phy_probe_info
*info
)
8797 unsigned long flags
;
8800 memset(info
, 0, sizeof(*info
));
8802 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
8803 niu_lock_parent(np
, flags
);
8805 for (port
= 8; port
< 32; port
++) {
8806 int dev_id_1
, dev_id_2
;
8808 dev_id_1
= mdio_read(np
, port
,
8809 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
8810 dev_id_2
= mdio_read(np
, port
,
8811 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
8812 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8816 dev_id_1
= mdio_read(np
, port
,
8817 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
8818 dev_id_2
= mdio_read(np
, port
,
8819 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
8820 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8824 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
8825 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
8826 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
8831 niu_unlock_parent(np
, flags
);
8836 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
8838 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
8839 int lowest_10g
, lowest_1g
;
8840 int num_10g
, num_1g
;
8844 num_10g
= num_1g
= 0;
8846 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
8847 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
8850 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
8851 parent
->num_ports
= 4;
8852 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8853 phy_encode(PORT_TYPE_1G
, 1) |
8854 phy_encode(PORT_TYPE_1G
, 2) |
8855 phy_encode(PORT_TYPE_1G
, 3));
8856 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
8859 parent
->num_ports
= 2;
8860 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8861 phy_encode(PORT_TYPE_10G
, 1));
8862 } else if ((np
->flags
& NIU_FLAGS_XCVR_SERDES
) &&
8863 (parent
->plat_type
== PLAT_TYPE_NIU
)) {
8864 /* this is the Monza case */
8865 if (np
->flags
& NIU_FLAGS_10G
) {
8866 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8867 phy_encode(PORT_TYPE_10G
, 1));
8869 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8870 phy_encode(PORT_TYPE_1G
, 1));
8873 err
= fill_phy_probe_info(np
, parent
, info
);
8877 num_10g
= count_10g_ports(info
, &lowest_10g
);
8878 num_1g
= count_1g_ports(info
, &lowest_1g
);
8880 switch ((num_10g
<< 4) | num_1g
) {
8882 if (lowest_1g
== 10)
8883 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8884 else if (lowest_1g
== 26)
8885 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8887 goto unknown_vg_1g_port
;
8891 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8892 phy_encode(PORT_TYPE_10G
, 1) |
8893 phy_encode(PORT_TYPE_1G
, 2) |
8894 phy_encode(PORT_TYPE_1G
, 3));
8898 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8899 phy_encode(PORT_TYPE_10G
, 1));
8903 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
8907 if (lowest_1g
== 10)
8908 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8909 else if (lowest_1g
== 26)
8910 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8912 goto unknown_vg_1g_port
;
8916 if ((lowest_10g
& 0x7) == 0)
8917 val
= (phy_encode(PORT_TYPE_10G
, 0) |
8918 phy_encode(PORT_TYPE_1G
, 1) |
8919 phy_encode(PORT_TYPE_1G
, 2) |
8920 phy_encode(PORT_TYPE_1G
, 3));
8922 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8923 phy_encode(PORT_TYPE_10G
, 1) |
8924 phy_encode(PORT_TYPE_1G
, 2) |
8925 phy_encode(PORT_TYPE_1G
, 3));
8929 if (lowest_1g
== 10)
8930 parent
->plat_type
= PLAT_TYPE_VF_P0
;
8931 else if (lowest_1g
== 26)
8932 parent
->plat_type
= PLAT_TYPE_VF_P1
;
8934 goto unknown_vg_1g_port
;
8936 val
= (phy_encode(PORT_TYPE_1G
, 0) |
8937 phy_encode(PORT_TYPE_1G
, 1) |
8938 phy_encode(PORT_TYPE_1G
, 2) |
8939 phy_encode(PORT_TYPE_1G
, 3));
8943 pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8949 parent
->port_phy
= val
;
8951 if (parent
->plat_type
== PLAT_TYPE_NIU
)
8952 niu_n2_divide_channels(parent
);
8954 niu_divide_channels(parent
, num_10g
, num_1g
);
8956 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
8961 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g
);
8965 static int __devinit
niu_probe_ports(struct niu
*np
)
8967 struct niu_parent
*parent
= np
->parent
;
8970 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
8971 err
= walk_phys(np
, parent
);
8975 niu_set_ldg_timer_res(np
, 2);
8976 for (i
= 0; i
<= LDN_MAX
; i
++)
8977 niu_ldn_irq_enable(np
, i
, 0);
8980 if (parent
->port_phy
== PORT_PHY_INVALID
)
8986 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
8988 struct niu_classifier
*cp
= &np
->clas
;
8990 cp
->tcam_top
= (u16
) np
->port
;
8991 cp
->tcam_sz
= np
->parent
->tcam_num_entries
/ np
->parent
->num_ports
;
8992 cp
->h1_init
= 0xffffffff;
8993 cp
->h2_init
= 0xffff;
8995 return fflp_early_init(np
);
8998 static void __devinit
niu_link_config_init(struct niu
*np
)
9000 struct niu_link_config
*lp
= &np
->link_config
;
9002 lp
->advertising
= (ADVERTISED_10baseT_Half
|
9003 ADVERTISED_10baseT_Full
|
9004 ADVERTISED_100baseT_Half
|
9005 ADVERTISED_100baseT_Full
|
9006 ADVERTISED_1000baseT_Half
|
9007 ADVERTISED_1000baseT_Full
|
9008 ADVERTISED_10000baseT_Full
|
9009 ADVERTISED_Autoneg
);
9010 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
9011 lp
->duplex
= DUPLEX_FULL
;
9012 lp
->active_duplex
= DUPLEX_INVALID
;
9015 lp
->loopback_mode
= LOOPBACK_MAC
;
9016 lp
->active_speed
= SPEED_10000
;
9017 lp
->active_duplex
= DUPLEX_FULL
;
9019 lp
->loopback_mode
= LOOPBACK_DISABLED
;
9023 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
9027 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
9028 np
->ipp_off
= 0x00000;
9029 np
->pcs_off
= 0x04000;
9030 np
->xpcs_off
= 0x02000;
9034 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
9035 np
->ipp_off
= 0x08000;
9036 np
->pcs_off
= 0x0a000;
9037 np
->xpcs_off
= 0x08000;
9041 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
9042 np
->ipp_off
= 0x04000;
9043 np
->pcs_off
= 0x0e000;
9044 np
->xpcs_off
= ~0UL;
9048 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
9049 np
->ipp_off
= 0x0c000;
9050 np
->pcs_off
= 0x12000;
9051 np
->xpcs_off
= ~0UL;
9055 dev_err(np
->device
, "Port %u is invalid, cannot compute MAC block offset\n", np
->port
);
9062 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
9064 struct msix_entry msi_vec
[NIU_NUM_LDG
];
9065 struct niu_parent
*parent
= np
->parent
;
9066 struct pci_dev
*pdev
= np
->pdev
;
9067 int i
, num_irqs
, err
;
9070 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
9071 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
9072 ldg_num_map
[i
] = first_ldg
+ i
;
9074 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
9075 parent
->txchan_per_port
[np
->port
] +
9076 (np
->port
== 0 ? 3 : 1));
9077 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
9080 for (i
= 0; i
< num_irqs
; i
++) {
9081 msi_vec
[i
].vector
= 0;
9082 msi_vec
[i
].entry
= i
;
9085 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
9087 np
->flags
&= ~NIU_FLAGS_MSIX
;
9095 np
->flags
|= NIU_FLAGS_MSIX
;
9096 for (i
= 0; i
< num_irqs
; i
++)
9097 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
9098 np
->num_ldg
= num_irqs
;
9101 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
9103 #ifdef CONFIG_SPARC64
9104 struct platform_device
*op
= np
->op
;
9105 const u32
*int_prop
;
9108 int_prop
= of_get_property(op
->dev
.of_node
, "interrupts", NULL
);
9112 for (i
= 0; i
< op
->archdata
.num_irqs
; i
++) {
9113 ldg_num_map
[i
] = int_prop
[i
];
9114 np
->ldg
[i
].irq
= op
->archdata
.irqs
[i
];
9117 np
->num_ldg
= op
->archdata
.num_irqs
;
9125 static int __devinit
niu_ldg_init(struct niu
*np
)
9127 struct niu_parent
*parent
= np
->parent
;
9128 u8 ldg_num_map
[NIU_NUM_LDG
];
9129 int first_chan
, num_chan
;
9130 int i
, err
, ldg_rotor
;
9134 np
->ldg
[0].irq
= np
->dev
->irq
;
9135 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
9136 err
= niu_n2_irq_init(np
, ldg_num_map
);
9140 niu_try_msix(np
, ldg_num_map
);
9143 for (i
= 0; i
< np
->num_ldg
; i
++) {
9144 struct niu_ldg
*lp
= &np
->ldg
[i
];
9146 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
9149 lp
->ldg_num
= ldg_num_map
[i
];
9150 lp
->timer
= 2; /* XXX */
9152 /* On N2 NIU the firmware has setup the SID mappings so they go
9153 * to the correct values that will route the LDG to the proper
9154 * interrupt in the NCU interrupt table.
9156 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
9157 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
9163 /* We adopt the LDG assignment ordering used by the N2 NIU
9164 * 'interrupt' properties because that simplifies a lot of
9165 * things. This ordering is:
9168 * MIF (if port zero)
9169 * SYSERR (if port zero)
9176 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
9182 if (ldg_rotor
== np
->num_ldg
)
9186 err
= niu_ldg_assign_ldn(np
, parent
,
9187 ldg_num_map
[ldg_rotor
],
9193 if (ldg_rotor
== np
->num_ldg
)
9196 err
= niu_ldg_assign_ldn(np
, parent
,
9197 ldg_num_map
[ldg_rotor
],
9203 if (ldg_rotor
== np
->num_ldg
)
9209 for (i
= 0; i
< port
; i
++)
9210 first_chan
+= parent
->rxchan_per_port
[port
];
9211 num_chan
= parent
->rxchan_per_port
[port
];
9213 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9214 err
= niu_ldg_assign_ldn(np
, parent
,
9215 ldg_num_map
[ldg_rotor
],
9220 if (ldg_rotor
== np
->num_ldg
)
9225 for (i
= 0; i
< port
; i
++)
9226 first_chan
+= parent
->txchan_per_port
[port
];
9227 num_chan
= parent
->txchan_per_port
[port
];
9228 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
9229 err
= niu_ldg_assign_ldn(np
, parent
,
9230 ldg_num_map
[ldg_rotor
],
9235 if (ldg_rotor
== np
->num_ldg
)
9242 static void __devexit
niu_ldg_free(struct niu
*np
)
9244 if (np
->flags
& NIU_FLAGS_MSIX
)
9245 pci_disable_msix(np
->pdev
);
9248 static int __devinit
niu_get_of_props(struct niu
*np
)
9250 #ifdef CONFIG_SPARC64
9251 struct net_device
*dev
= np
->dev
;
9252 struct device_node
*dp
;
9253 const char *phy_type
;
9258 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9259 dp
= np
->op
->dev
.of_node
;
9261 dp
= pci_device_to_OF_node(np
->pdev
);
9263 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
9265 netdev_err(dev
, "%s: OF node lacks phy-type property\n",
9270 if (!strcmp(phy_type
, "none"))
9273 strcpy(np
->vpd
.phy_type
, phy_type
);
9275 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
9276 netdev_err(dev
, "%s: Illegal phy string [%s]\n",
9277 dp
->full_name
, np
->vpd
.phy_type
);
9281 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
9283 netdev_err(dev
, "%s: OF node lacks local-mac-address property\n",
9287 if (prop_len
!= dev
->addr_len
) {
9288 netdev_err(dev
, "%s: OF MAC address prop len (%d) is wrong\n",
9289 dp
->full_name
, prop_len
);
9291 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
9292 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
9293 netdev_err(dev
, "%s: OF MAC address is invalid\n",
9295 netdev_err(dev
, "%s: [ %pM ]\n", dp
->full_name
, dev
->perm_addr
);
9299 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
9301 model
= of_get_property(dp
, "model", &prop_len
);
9304 strcpy(np
->vpd
.model
, model
);
9306 if (of_find_property(dp
, "hot-swappable-phy", &prop_len
)) {
9307 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
9308 NIU_FLAGS_HOTPLUG_PHY
);
9317 static int __devinit
niu_get_invariants(struct niu
*np
)
9319 int err
, have_props
;
9322 err
= niu_get_of_props(np
);
9328 err
= niu_init_mac_ipp_pcs_base(np
);
9333 err
= niu_get_and_validate_port(np
);
9338 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
9341 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
9342 offset
= niu_pci_vpd_offset(np
);
9343 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9344 "%s() VPD offset [%08x]\n", __func__
, offset
);
9346 niu_pci_vpd_fetch(np
, offset
);
9347 nw64(ESPC_PIO_EN
, 0);
9349 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
9350 niu_pci_vpd_validate(np
);
9351 err
= niu_get_and_validate_port(np
);
9356 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
9357 err
= niu_get_and_validate_port(np
);
9360 err
= niu_pci_probe_sprom(np
);
9366 err
= niu_probe_ports(np
);
9372 niu_classifier_swstate_init(np
);
9373 niu_link_config_init(np
);
9375 err
= niu_determine_phy_disposition(np
);
9377 err
= niu_init_link(np
);
9382 static LIST_HEAD(niu_parent_list
);
9383 static DEFINE_MUTEX(niu_parent_lock
);
9384 static int niu_parent_index
;
9386 static ssize_t
show_port_phy(struct device
*dev
,
9387 struct device_attribute
*attr
, char *buf
)
9389 struct platform_device
*plat_dev
= to_platform_device(dev
);
9390 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9391 u32 port_phy
= p
->port_phy
;
9392 char *orig_buf
= buf
;
9395 if (port_phy
== PORT_PHY_UNKNOWN
||
9396 port_phy
== PORT_PHY_INVALID
)
9399 for (i
= 0; i
< p
->num_ports
; i
++) {
9400 const char *type_str
;
9403 type
= phy_decode(port_phy
, i
);
9404 if (type
== PORT_TYPE_10G
)
9409 (i
== 0) ? "%s" : " %s",
9412 buf
+= sprintf(buf
, "\n");
9413 return buf
- orig_buf
;
9416 static ssize_t
show_plat_type(struct device
*dev
,
9417 struct device_attribute
*attr
, char *buf
)
9419 struct platform_device
*plat_dev
= to_platform_device(dev
);
9420 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9421 const char *type_str
;
9423 switch (p
->plat_type
) {
9424 case PLAT_TYPE_ATLAS
:
9430 case PLAT_TYPE_VF_P0
:
9433 case PLAT_TYPE_VF_P1
:
9437 type_str
= "unknown";
9441 return sprintf(buf
, "%s\n", type_str
);
9444 static ssize_t
__show_chan_per_port(struct device
*dev
,
9445 struct device_attribute
*attr
, char *buf
,
9448 struct platform_device
*plat_dev
= to_platform_device(dev
);
9449 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9450 char *orig_buf
= buf
;
9454 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
9456 for (i
= 0; i
< p
->num_ports
; i
++) {
9458 (i
== 0) ? "%d" : " %d",
9461 buf
+= sprintf(buf
, "\n");
9463 return buf
- orig_buf
;
9466 static ssize_t
show_rxchan_per_port(struct device
*dev
,
9467 struct device_attribute
*attr
, char *buf
)
9469 return __show_chan_per_port(dev
, attr
, buf
, 1);
9472 static ssize_t
show_txchan_per_port(struct device
*dev
,
9473 struct device_attribute
*attr
, char *buf
)
9475 return __show_chan_per_port(dev
, attr
, buf
, 1);
9478 static ssize_t
show_num_ports(struct device
*dev
,
9479 struct device_attribute
*attr
, char *buf
)
9481 struct platform_device
*plat_dev
= to_platform_device(dev
);
9482 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
9484 return sprintf(buf
, "%d\n", p
->num_ports
);
9487 static struct device_attribute niu_parent_attributes
[] = {
9488 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
9489 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
9490 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
9491 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
9492 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
9496 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
9497 union niu_parent_id
*id
,
9500 struct platform_device
*plat_dev
;
9501 struct niu_parent
*p
;
9504 plat_dev
= platform_device_register_simple("niu-board", niu_parent_index
,
9506 if (IS_ERR(plat_dev
))
9509 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
9510 int err
= device_create_file(&plat_dev
->dev
,
9511 &niu_parent_attributes
[i
]);
9513 goto fail_unregister
;
9516 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
9518 goto fail_unregister
;
9520 p
->index
= niu_parent_index
++;
9522 plat_dev
->dev
.platform_data
= p
;
9523 p
->plat_dev
= plat_dev
;
9525 memcpy(&p
->id
, id
, sizeof(*id
));
9526 p
->plat_type
= ptype
;
9527 INIT_LIST_HEAD(&p
->list
);
9528 atomic_set(&p
->refcnt
, 0);
9529 list_add(&p
->list
, &niu_parent_list
);
9530 spin_lock_init(&p
->lock
);
9532 p
->rxdma_clock_divider
= 7500;
9534 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
9535 if (p
->plat_type
== PLAT_TYPE_NIU
)
9536 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
9538 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
9539 int index
= i
- CLASS_CODE_USER_PROG1
;
9541 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
9542 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
9545 (FLOW_KEY_L4_BYTE12
<<
9546 FLOW_KEY_L4_0_SHIFT
) |
9547 (FLOW_KEY_L4_BYTE12
<<
9548 FLOW_KEY_L4_1_SHIFT
));
9551 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
9552 p
->ldg_map
[i
] = LDG_INVALID
;
9557 platform_device_unregister(plat_dev
);
9561 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
9562 union niu_parent_id
*id
,
9565 struct niu_parent
*p
, *tmp
;
9566 int port
= np
->port
;
9568 mutex_lock(&niu_parent_lock
);
9570 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
9571 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
9577 p
= niu_new_parent(np
, id
, ptype
);
9583 sprintf(port_name
, "port%d", port
);
9584 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
9588 p
->ports
[port
] = np
;
9589 atomic_inc(&p
->refcnt
);
9592 mutex_unlock(&niu_parent_lock
);
9597 static void niu_put_parent(struct niu
*np
)
9599 struct niu_parent
*p
= np
->parent
;
9603 BUG_ON(!p
|| p
->ports
[port
] != np
);
9605 netif_printk(np
, probe
, KERN_DEBUG
, np
->dev
,
9606 "%s() port[%u]\n", __func__
, port
);
9608 sprintf(port_name
, "port%d", port
);
9610 mutex_lock(&niu_parent_lock
);
9612 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
9614 p
->ports
[port
] = NULL
;
9617 if (atomic_dec_and_test(&p
->refcnt
)) {
9619 platform_device_unregister(p
->plat_dev
);
9622 mutex_unlock(&niu_parent_lock
);
9625 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
9626 u64
*handle
, gfp_t flag
)
9631 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
9637 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
9638 void *cpu_addr
, u64 handle
)
9640 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
9643 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
9644 unsigned long offset
, size_t size
,
9645 enum dma_data_direction direction
)
9647 return dma_map_page(dev
, page
, offset
, size
, direction
);
9650 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
9651 size_t size
, enum dma_data_direction direction
)
9653 dma_unmap_page(dev
, dma_address
, size
, direction
);
9656 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
9658 enum dma_data_direction direction
)
9660 return dma_map_single(dev
, cpu_addr
, size
, direction
);
9663 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
9665 enum dma_data_direction direction
)
9667 dma_unmap_single(dev
, dma_address
, size
, direction
);
9670 static const struct niu_ops niu_pci_ops
= {
9671 .alloc_coherent
= niu_pci_alloc_coherent
,
9672 .free_coherent
= niu_pci_free_coherent
,
9673 .map_page
= niu_pci_map_page
,
9674 .unmap_page
= niu_pci_unmap_page
,
9675 .map_single
= niu_pci_map_single
,
9676 .unmap_single
= niu_pci_unmap_single
,
9679 static void __devinit
niu_driver_version(void)
9681 static int niu_version_printed
;
9683 if (niu_version_printed
++ == 0)
9684 pr_info("%s", version
);
9687 static struct net_device
* __devinit
niu_alloc_and_init(
9688 struct device
*gen_dev
, struct pci_dev
*pdev
,
9689 struct platform_device
*op
, const struct niu_ops
*ops
,
9692 struct net_device
*dev
;
9695 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
9697 dev_err(gen_dev
, "Etherdev alloc failed, aborting\n");
9701 SET_NETDEV_DEV(dev
, gen_dev
);
9703 np
= netdev_priv(dev
);
9707 np
->device
= gen_dev
;
9710 np
->msg_enable
= niu_debug
;
9712 spin_lock_init(&np
->lock
);
9713 INIT_WORK(&np
->reset_task
, niu_reset_task
);
9720 static const struct net_device_ops niu_netdev_ops
= {
9721 .ndo_open
= niu_open
,
9722 .ndo_stop
= niu_close
,
9723 .ndo_start_xmit
= niu_start_xmit
,
9724 .ndo_get_stats
= niu_get_stats
,
9725 .ndo_set_multicast_list
= niu_set_rx_mode
,
9726 .ndo_validate_addr
= eth_validate_addr
,
9727 .ndo_set_mac_address
= niu_set_mac_addr
,
9728 .ndo_do_ioctl
= niu_ioctl
,
9729 .ndo_tx_timeout
= niu_tx_timeout
,
9730 .ndo_change_mtu
= niu_change_mtu
,
9733 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
9735 dev
->netdev_ops
= &niu_netdev_ops
;
9736 dev
->ethtool_ops
= &niu_ethtool_ops
;
9737 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
9740 static void __devinit
niu_device_announce(struct niu
*np
)
9742 struct net_device
*dev
= np
->dev
;
9744 pr_info("%s: NIU Ethernet %pM\n", dev
->name
, dev
->dev_addr
);
9746 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
9747 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9749 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9750 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9751 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
9752 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9753 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9756 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9758 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
9759 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
9760 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" :
9761 (np
->flags
& NIU_FLAGS_XCVR_SERDES
? "SERDES" :
9763 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
9764 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
9769 static void __devinit
niu_set_basic_features(struct net_device
*dev
)
9771 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
|
9772 NETIF_F_GRO
| NETIF_F_RXHASH
);
9775 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
9776 const struct pci_device_id
*ent
)
9778 union niu_parent_id parent_id
;
9779 struct net_device
*dev
;
9785 niu_driver_version();
9787 err
= pci_enable_device(pdev
);
9789 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
9793 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
9794 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9795 dev_err(&pdev
->dev
, "Cannot find proper PCI device base addresses, aborting\n");
9797 goto err_out_disable_pdev
;
9800 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9802 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
9803 goto err_out_disable_pdev
;
9806 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
9808 dev_err(&pdev
->dev
, "Cannot find PCI Express capability, aborting\n");
9809 goto err_out_free_res
;
9812 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
9813 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
9816 goto err_out_free_res
;
9818 np
= netdev_priv(dev
);
9820 memset(&parent_id
, 0, sizeof(parent_id
));
9821 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
9822 parent_id
.pci
.bus
= pdev
->bus
->number
;
9823 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
9825 np
->parent
= niu_get_parent(np
, &parent_id
,
9829 goto err_out_free_dev
;
9832 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
9833 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
9834 val16
|= (PCI_EXP_DEVCTL_CERE
|
9835 PCI_EXP_DEVCTL_NFERE
|
9836 PCI_EXP_DEVCTL_FERE
|
9837 PCI_EXP_DEVCTL_URRE
|
9838 PCI_EXP_DEVCTL_RELAX_EN
);
9839 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
9841 dma_mask
= DMA_BIT_MASK(44);
9842 err
= pci_set_dma_mask(pdev
, dma_mask
);
9844 dev
->features
|= NETIF_F_HIGHDMA
;
9845 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
9847 dev_err(&pdev
->dev
, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9848 goto err_out_release_parent
;
9851 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
9852 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9854 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
9855 goto err_out_release_parent
;
9859 niu_set_basic_features(dev
);
9861 np
->regs
= pci_ioremap_bar(pdev
, 0);
9863 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
9865 goto err_out_release_parent
;
9868 pci_set_master(pdev
);
9869 pci_save_state(pdev
);
9871 dev
->irq
= pdev
->irq
;
9873 niu_assign_netdev_ops(dev
);
9875 err
= niu_get_invariants(np
);
9878 dev_err(&pdev
->dev
, "Problem fetching invariants of chip, aborting\n");
9879 goto err_out_iounmap
;
9882 err
= register_netdev(dev
);
9884 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
9885 goto err_out_iounmap
;
9888 pci_set_drvdata(pdev
, dev
);
9890 niu_device_announce(np
);
9900 err_out_release_parent
:
9907 pci_release_regions(pdev
);
9909 err_out_disable_pdev
:
9910 pci_disable_device(pdev
);
9911 pci_set_drvdata(pdev
, NULL
);
9916 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
9918 struct net_device
*dev
= pci_get_drvdata(pdev
);
9921 struct niu
*np
= netdev_priv(dev
);
9923 unregister_netdev(dev
);
9934 pci_release_regions(pdev
);
9935 pci_disable_device(pdev
);
9936 pci_set_drvdata(pdev
, NULL
);
9940 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
9942 struct net_device
*dev
= pci_get_drvdata(pdev
);
9943 struct niu
*np
= netdev_priv(dev
);
9944 unsigned long flags
;
9946 if (!netif_running(dev
))
9949 flush_work_sync(&np
->reset_task
);
9952 del_timer_sync(&np
->timer
);
9954 spin_lock_irqsave(&np
->lock
, flags
);
9955 niu_enable_interrupts(np
, 0);
9956 spin_unlock_irqrestore(&np
->lock
, flags
);
9958 netif_device_detach(dev
);
9960 spin_lock_irqsave(&np
->lock
, flags
);
9962 spin_unlock_irqrestore(&np
->lock
, flags
);
9964 pci_save_state(pdev
);
9969 static int niu_resume(struct pci_dev
*pdev
)
9971 struct net_device
*dev
= pci_get_drvdata(pdev
);
9972 struct niu
*np
= netdev_priv(dev
);
9973 unsigned long flags
;
9976 if (!netif_running(dev
))
9979 pci_restore_state(pdev
);
9981 netif_device_attach(dev
);
9983 spin_lock_irqsave(&np
->lock
, flags
);
9985 err
= niu_init_hw(np
);
9987 np
->timer
.expires
= jiffies
+ HZ
;
9988 add_timer(&np
->timer
);
9989 niu_netif_start(np
);
9992 spin_unlock_irqrestore(&np
->lock
, flags
);
9997 static struct pci_driver niu_pci_driver
= {
9998 .name
= DRV_MODULE_NAME
,
9999 .id_table
= niu_pci_tbl
,
10000 .probe
= niu_pci_init_one
,
10001 .remove
= __devexit_p(niu_pci_remove_one
),
10002 .suspend
= niu_suspend
,
10003 .resume
= niu_resume
,
10006 #ifdef CONFIG_SPARC64
10007 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
10008 u64
*dma_addr
, gfp_t flag
)
10010 unsigned long order
= get_order(size
);
10011 unsigned long page
= __get_free_pages(flag
, order
);
10015 memset((char *)page
, 0, PAGE_SIZE
<< order
);
10016 *dma_addr
= __pa(page
);
10018 return (void *) page
;
10021 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
10022 void *cpu_addr
, u64 handle
)
10024 unsigned long order
= get_order(size
);
10026 free_pages((unsigned long) cpu_addr
, order
);
10029 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
10030 unsigned long offset
, size_t size
,
10031 enum dma_data_direction direction
)
10033 return page_to_phys(page
) + offset
;
10036 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
10037 size_t size
, enum dma_data_direction direction
)
10039 /* Nothing to do. */
10042 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
10044 enum dma_data_direction direction
)
10046 return __pa(cpu_addr
);
10049 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
10051 enum dma_data_direction direction
)
10053 /* Nothing to do. */
10056 static const struct niu_ops niu_phys_ops
= {
10057 .alloc_coherent
= niu_phys_alloc_coherent
,
10058 .free_coherent
= niu_phys_free_coherent
,
10059 .map_page
= niu_phys_map_page
,
10060 .unmap_page
= niu_phys_unmap_page
,
10061 .map_single
= niu_phys_map_single
,
10062 .unmap_single
= niu_phys_unmap_single
,
10065 static int __devinit
niu_of_probe(struct platform_device
*op
)
10067 union niu_parent_id parent_id
;
10068 struct net_device
*dev
;
10073 niu_driver_version();
10075 reg
= of_get_property(op
->dev
.of_node
, "reg", NULL
);
10077 dev_err(&op
->dev
, "%s: No 'reg' property, aborting\n",
10078 op
->dev
.of_node
->full_name
);
10082 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
10083 &niu_phys_ops
, reg
[0] & 0x1);
10088 np
= netdev_priv(dev
);
10090 memset(&parent_id
, 0, sizeof(parent_id
));
10091 parent_id
.of
= of_get_parent(op
->dev
.of_node
);
10093 np
->parent
= niu_get_parent(np
, &parent_id
,
10097 goto err_out_free_dev
;
10100 niu_set_basic_features(dev
);
10102 np
->regs
= of_ioremap(&op
->resource
[1], 0,
10103 resource_size(&op
->resource
[1]),
10106 dev_err(&op
->dev
, "Cannot map device registers, aborting\n");
10108 goto err_out_release_parent
;
10111 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
10112 resource_size(&op
->resource
[2]),
10114 if (!np
->vir_regs_1
) {
10115 dev_err(&op
->dev
, "Cannot map device vir registers 1, aborting\n");
10117 goto err_out_iounmap
;
10120 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
10121 resource_size(&op
->resource
[3]),
10123 if (!np
->vir_regs_2
) {
10124 dev_err(&op
->dev
, "Cannot map device vir registers 2, aborting\n");
10126 goto err_out_iounmap
;
10129 niu_assign_netdev_ops(dev
);
10131 err
= niu_get_invariants(np
);
10133 if (err
!= -ENODEV
)
10134 dev_err(&op
->dev
, "Problem fetching invariants of chip, aborting\n");
10135 goto err_out_iounmap
;
10138 err
= register_netdev(dev
);
10140 dev_err(&op
->dev
, "Cannot register net device, aborting\n");
10141 goto err_out_iounmap
;
10144 dev_set_drvdata(&op
->dev
, dev
);
10146 niu_device_announce(np
);
10151 if (np
->vir_regs_1
) {
10152 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10153 resource_size(&op
->resource
[2]));
10154 np
->vir_regs_1
= NULL
;
10157 if (np
->vir_regs_2
) {
10158 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10159 resource_size(&op
->resource
[3]));
10160 np
->vir_regs_2
= NULL
;
10164 of_iounmap(&op
->resource
[1], np
->regs
,
10165 resource_size(&op
->resource
[1]));
10169 err_out_release_parent
:
10170 niu_put_parent(np
);
10179 static int __devexit
niu_of_remove(struct platform_device
*op
)
10181 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
10184 struct niu
*np
= netdev_priv(dev
);
10186 unregister_netdev(dev
);
10188 if (np
->vir_regs_1
) {
10189 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
10190 resource_size(&op
->resource
[2]));
10191 np
->vir_regs_1
= NULL
;
10194 if (np
->vir_regs_2
) {
10195 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
10196 resource_size(&op
->resource
[3]));
10197 np
->vir_regs_2
= NULL
;
10201 of_iounmap(&op
->resource
[1], np
->regs
,
10202 resource_size(&op
->resource
[1]));
10208 niu_put_parent(np
);
10211 dev_set_drvdata(&op
->dev
, NULL
);
10216 static const struct of_device_id niu_match
[] = {
10219 .compatible
= "SUNW,niusl",
10223 MODULE_DEVICE_TABLE(of
, niu_match
);
10225 static struct platform_driver niu_of_driver
= {
10228 .owner
= THIS_MODULE
,
10229 .of_match_table
= niu_match
,
10231 .probe
= niu_of_probe
,
10232 .remove
= __devexit_p(niu_of_remove
),
10235 #endif /* CONFIG_SPARC64 */
10237 static int __init
niu_init(void)
10241 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
10243 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
10245 #ifdef CONFIG_SPARC64
10246 err
= platform_driver_register(&niu_of_driver
);
10250 err
= pci_register_driver(&niu_pci_driver
);
10251 #ifdef CONFIG_SPARC64
10253 platform_driver_unregister(&niu_of_driver
);
10260 static void __exit
niu_exit(void)
10262 pci_unregister_driver(&niu_pci_driver
);
10263 #ifdef CONFIG_SPARC64
10264 platform_driver_unregister(&niu_of_driver
);
10268 module_init(niu_init
);
10269 module_exit(niu_exit
);