1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
29 #include <linux/of_device.h>
34 #define DRV_MODULE_NAME "niu"
35 #define PFX DRV_MODULE_NAME ": "
36 #define DRV_MODULE_VERSION "0.9"
37 #define DRV_MODULE_RELDATE "May 4, 2008"
39 static char version
[] __devinitdata
=
40 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION
);
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK 0x00000fffffffffffULL
52 static u64
readq(void __iomem
*reg
)
54 return ((u64
) readl(reg
)) | (((u64
) readl(reg
+ 4UL)) << 32);
57 static void writeq(u64 val
, void __iomem
*reg
)
59 writel(val
& 0xffffffff, reg
);
60 writel(val
>> 32, reg
+ 0x4UL
);
64 static struct pci_device_id niu_pci_tbl
[] = {
65 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
69 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
71 #define NIU_TX_TIMEOUT (5 * HZ)
73 #define nr64(reg) readq(np->regs + (reg))
74 #define nw64(reg, val) writeq((val), np->regs + (reg))
76 #define nr64_mac(reg) readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
79 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
82 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
85 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
91 static int debug
= -1;
92 module_param(debug
, int, 0);
93 MODULE_PARM_DESC(debug
, "NIU debug level");
95 #define niudbg(TYPE, f, a...) \
96 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97 printk(KERN_DEBUG PFX f, ## a); \
100 #define niuinfo(TYPE, f, a...) \
101 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102 printk(KERN_INFO PFX f, ## a); \
105 #define niuwarn(TYPE, f, a...) \
106 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107 printk(KERN_WARNING PFX f, ## a); \
110 #define niu_lock_parent(np, flags) \
111 spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113 spin_unlock_irqrestore(&np->parent->lock, flags)
115 static int serdes_init_10g_serdes(struct niu
*np
);
117 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
118 u64 bits
, int limit
, int delay
)
120 while (--limit
>= 0) {
121 u64 val
= nr64_mac(reg
);
132 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
133 u64 bits
, int limit
, int delay
,
134 const char *reg_name
)
139 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
141 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
142 "would not clear, val[%llx]\n",
143 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
144 (unsigned long long) nr64_mac(reg
));
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
153 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
154 u64 bits
, int limit
, int delay
)
156 while (--limit
>= 0) {
157 u64 val
= nr64_ipp(reg
);
168 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
169 u64 bits
, int limit
, int delay
,
170 const char *reg_name
)
179 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
181 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
182 "would not clear, val[%llx]\n",
183 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
184 (unsigned long long) nr64_ipp(reg
));
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
193 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
194 u64 bits
, int limit
, int delay
)
196 while (--limit
>= 0) {
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
213 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
214 u64 bits
, int limit
, int delay
,
215 const char *reg_name
)
220 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
222 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
223 "would not clear, val[%llx]\n",
224 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
225 (unsigned long long) nr64(reg
));
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
234 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
236 u64 val
= (u64
) lp
->timer
;
239 val
|= LDG_IMGMT_ARM
;
241 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
244 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
246 unsigned long mask_reg
, bits
;
249 if (ldn
< 0 || ldn
> LDN_MAX
)
253 mask_reg
= LD_IM0(ldn
);
256 mask_reg
= LD_IM1(ldn
- 64);
260 val
= nr64(mask_reg
);
270 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
272 struct niu_parent
*parent
= np
->parent
;
275 for (i
= 0; i
<= LDN_MAX
; i
++) {
278 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
281 err
= niu_ldn_irq_enable(np
, i
, on
);
288 static int niu_enable_interrupts(struct niu
*np
, int on
)
292 for (i
= 0; i
< np
->num_ldg
; i
++) {
293 struct niu_ldg
*lp
= &np
->ldg
[i
];
296 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
300 for (i
= 0; i
< np
->num_ldg
; i
++)
301 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
306 static u32
phy_encode(u32 type
, int port
)
308 return (type
<< (port
* 2));
311 static u32
phy_decode(u32 val
, int port
)
313 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
316 static int mdio_wait(struct niu
*np
)
321 while (--limit
> 0) {
322 val
= nr64(MIF_FRAME_OUTPUT
);
323 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
324 return val
& MIF_FRAME_OUTPUT_DATA
;
332 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
336 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
341 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
342 return mdio_wait(np
);
345 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
349 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
354 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
362 static int mii_read(struct niu
*np
, int port
, int reg
)
364 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
365 return mdio_wait(np
);
368 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
372 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
380 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
384 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
385 ESR2_TI_PLL_TX_CFG_L(channel
),
388 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
389 ESR2_TI_PLL_TX_CFG_H(channel
),
394 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
398 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
399 ESR2_TI_PLL_RX_CFG_L(channel
),
402 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
403 ESR2_TI_PLL_RX_CFG_H(channel
),
408 /* Mode is always 10G fiber. */
409 static int serdes_init_niu(struct niu
*np
)
411 struct niu_link_config
*lp
= &np
->link_config
;
415 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
416 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
417 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
418 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
420 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
421 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
423 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
424 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
426 tx_cfg
|= PLL_TX_CFG_ENTEST
;
427 rx_cfg
|= PLL_RX_CFG_ENTEST
;
430 /* Initialize all 4 lanes of the SERDES. */
431 for (i
= 0; i
< 4; i
++) {
432 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
437 for (i
= 0; i
< 4; i
++) {
438 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
446 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
450 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
452 *val
= (err
& 0xffff);
453 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
454 ESR_RXTX_CTRL_H(chan
));
456 *val
|= ((err
& 0xffff) << 16);
462 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
466 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
467 ESR_GLUE_CTRL0_L(chan
));
469 *val
= (err
& 0xffff);
470 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
471 ESR_GLUE_CTRL0_H(chan
));
473 *val
|= ((err
& 0xffff) << 16);
480 static int esr_read_reset(struct niu
*np
, u32
*val
)
484 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
485 ESR_RXTX_RESET_CTRL_L
);
487 *val
= (err
& 0xffff);
488 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
489 ESR_RXTX_RESET_CTRL_H
);
491 *val
|= ((err
& 0xffff) << 16);
498 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
502 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
503 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
505 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
506 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
510 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
514 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
515 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
517 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
518 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
522 static int esr_reset(struct niu
*np
)
527 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
528 ESR_RXTX_RESET_CTRL_L
, 0x0000);
531 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
532 ESR_RXTX_RESET_CTRL_H
, 0xffff);
537 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
538 ESR_RXTX_RESET_CTRL_L
, 0xffff);
543 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
544 ESR_RXTX_RESET_CTRL_H
, 0x0000);
549 err
= esr_read_reset(np
, &reset
);
553 dev_err(np
->device
, PFX
"Port %u ESR_RESET "
554 "did not clear [%08x]\n",
562 static int serdes_init_10g(struct niu
*np
)
564 struct niu_link_config
*lp
= &np
->link_config
;
565 unsigned long ctrl_reg
, test_cfg_reg
, i
;
566 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
571 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
572 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
575 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
576 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
582 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
583 ENET_SERDES_CTRL_SDET_1
|
584 ENET_SERDES_CTRL_SDET_2
|
585 ENET_SERDES_CTRL_SDET_3
|
586 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
587 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
588 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
589 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
590 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
591 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
592 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
593 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
596 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
597 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
598 ENET_SERDES_TEST_MD_0_SHIFT
) |
599 (ENET_TEST_MD_PAD_LOOPBACK
<<
600 ENET_SERDES_TEST_MD_1_SHIFT
) |
601 (ENET_TEST_MD_PAD_LOOPBACK
<<
602 ENET_SERDES_TEST_MD_2_SHIFT
) |
603 (ENET_TEST_MD_PAD_LOOPBACK
<<
604 ENET_SERDES_TEST_MD_3_SHIFT
));
607 nw64(ctrl_reg
, ctrl_val
);
608 nw64(test_cfg_reg
, test_cfg_val
);
610 /* Initialize all 4 lanes of the SERDES. */
611 for (i
= 0; i
< 4; i
++) {
612 u32 rxtx_ctrl
, glue0
;
614 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
617 err
= esr_read_glue0(np
, i
, &glue0
);
621 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
622 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
623 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
625 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
626 ESR_GLUE_CTRL0_THCNT
|
627 ESR_GLUE_CTRL0_BLTIME
);
628 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
629 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
630 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
631 (BLTIME_300_CYCLES
<<
632 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
634 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
637 err
= esr_write_glue0(np
, i
, glue0
);
646 sig
= nr64(ESR_INT_SIGNALS
);
649 mask
= ESR_INT_SIGNALS_P0_BITS
;
650 val
= (ESR_INT_SRDY0_P0
|
660 mask
= ESR_INT_SIGNALS_P1_BITS
;
661 val
= (ESR_INT_SRDY0_P1
|
674 if ((sig
& mask
) != val
) {
675 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
676 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
679 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
680 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
683 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
684 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
688 static int serdes_init_1g(struct niu
*np
)
692 val
= nr64(ENET_SERDES_1_PLL_CFG
);
693 val
&= ~ENET_SERDES_PLL_FBDIV2
;
696 val
|= ENET_SERDES_PLL_HRATE0
;
699 val
|= ENET_SERDES_PLL_HRATE1
;
702 val
|= ENET_SERDES_PLL_HRATE2
;
705 val
|= ENET_SERDES_PLL_HRATE3
;
710 nw64(ENET_SERDES_1_PLL_CFG
, val
);
715 static int serdes_init_1g_serdes(struct niu
*np
)
717 struct niu_link_config
*lp
= &np
->link_config
;
718 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
719 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
721 u64 reset_val
, val_rd
;
723 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
724 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
725 ENET_SERDES_PLL_FBDIV0
;
728 reset_val
= ENET_SERDES_RESET_0
;
729 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
730 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
731 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
734 reset_val
= ENET_SERDES_RESET_1
;
735 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
736 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
737 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
743 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
744 ENET_SERDES_CTRL_SDET_1
|
745 ENET_SERDES_CTRL_SDET_2
|
746 ENET_SERDES_CTRL_SDET_3
|
747 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
748 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
749 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
750 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
751 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
752 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
753 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
754 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
757 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
758 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
759 ENET_SERDES_TEST_MD_0_SHIFT
) |
760 (ENET_TEST_MD_PAD_LOOPBACK
<<
761 ENET_SERDES_TEST_MD_1_SHIFT
) |
762 (ENET_TEST_MD_PAD_LOOPBACK
<<
763 ENET_SERDES_TEST_MD_2_SHIFT
) |
764 (ENET_TEST_MD_PAD_LOOPBACK
<<
765 ENET_SERDES_TEST_MD_3_SHIFT
));
768 nw64(ENET_SERDES_RESET
, reset_val
);
770 val_rd
= nr64(ENET_SERDES_RESET
);
771 val_rd
&= ~reset_val
;
773 nw64(ctrl_reg
, ctrl_val
);
774 nw64(test_cfg_reg
, test_cfg_val
);
775 nw64(ENET_SERDES_RESET
, val_rd
);
778 /* Initialize all 4 lanes of the SERDES. */
779 for (i
= 0; i
< 4; i
++) {
780 u32 rxtx_ctrl
, glue0
;
782 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
785 err
= esr_read_glue0(np
, i
, &glue0
);
789 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
790 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
791 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
793 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
794 ESR_GLUE_CTRL0_THCNT
|
795 ESR_GLUE_CTRL0_BLTIME
);
796 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
797 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
798 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
799 (BLTIME_300_CYCLES
<<
800 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
802 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
805 err
= esr_write_glue0(np
, i
, glue0
);
811 sig
= nr64(ESR_INT_SIGNALS
);
814 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
819 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
827 if ((sig
& mask
) != val
) {
828 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
829 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
836 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
838 struct niu_link_config
*lp
= &np
->link_config
;
846 current_speed
= SPEED_INVALID
;
847 current_duplex
= DUPLEX_INVALID
;
849 spin_lock_irqsave(&np
->lock
, flags
);
851 val
= nr64_pcs(PCS_MII_STAT
);
853 if (val
& PCS_MII_STAT_LINK_STATUS
) {
855 current_speed
= SPEED_1000
;
856 current_duplex
= DUPLEX_FULL
;
859 lp
->active_speed
= current_speed
;
860 lp
->active_duplex
= current_duplex
;
861 spin_unlock_irqrestore(&np
->lock
, flags
);
863 *link_up_p
= link_up
;
867 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
870 struct niu_link_config
*lp
= &np
->link_config
;
877 if (!(np
->flags
& NIU_FLAGS_10G
))
878 return link_status_1g_serdes(np
, link_up_p
);
880 current_speed
= SPEED_INVALID
;
881 current_duplex
= DUPLEX_INVALID
;
882 spin_lock_irqsave(&np
->lock
, flags
);
884 val
= nr64_xpcs(XPCS_STATUS(0));
885 val2
= nr64_mac(XMAC_INTER2
);
886 if (val2
& 0x01000000)
889 if ((val
& 0x1000ULL
) && link_ok
) {
891 current_speed
= SPEED_10000
;
892 current_duplex
= DUPLEX_FULL
;
894 lp
->active_speed
= current_speed
;
895 lp
->active_duplex
= current_duplex
;
896 spin_unlock_irqrestore(&np
->lock
, flags
);
897 *link_up_p
= link_up
;
901 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
903 struct niu_link_config
*lp
= &np
->link_config
;
904 u16 current_speed
, bmsr
;
910 current_speed
= SPEED_INVALID
;
911 current_duplex
= DUPLEX_INVALID
;
913 spin_lock_irqsave(&np
->lock
, flags
);
917 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
922 if (bmsr
& BMSR_LSTATUS
) {
923 u16 adv
, lpa
, common
, estat
;
925 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
930 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
937 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
942 current_speed
= SPEED_1000
;
943 current_duplex
= DUPLEX_FULL
;
946 lp
->active_speed
= current_speed
;
947 lp
->active_duplex
= current_duplex
;
951 spin_unlock_irqrestore(&np
->lock
, flags
);
953 *link_up_p
= link_up
;
957 static int bcm8704_reset(struct niu
*np
)
961 err
= mdio_read(np
, np
->phy_addr
,
962 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
966 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
972 while (--limit
>= 0) {
973 err
= mdio_read(np
, np
->phy_addr
,
974 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
977 if (!(err
& BMCR_RESET
))
981 dev_err(np
->device
, PFX
"Port %u PHY will not reset "
982 "(bmcr=%04x)\n", np
->port
, (err
& 0xffff));
988 /* When written, certain PHY registers need to be read back twice
989 * in order for the bits to settle properly.
991 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
993 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
996 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1002 static int bcm8706_init_user_dev3(struct niu
*np
)
1007 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1008 BCM8704_USER_OPT_DIGITAL_CTRL
);
1011 err
&= ~USER_ODIG_CTRL_GPIOS
;
1012 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1013 err
|= USER_ODIG_CTRL_RESV2
;
1014 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1015 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1024 static int bcm8704_init_user_dev3(struct niu
*np
)
1028 err
= mdio_write(np
, np
->phy_addr
,
1029 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1030 (USER_CONTROL_OPTXRST_LVL
|
1031 USER_CONTROL_OPBIASFLT_LVL
|
1032 USER_CONTROL_OBTMPFLT_LVL
|
1033 USER_CONTROL_OPPRFLT_LVL
|
1034 USER_CONTROL_OPTXFLT_LVL
|
1035 USER_CONTROL_OPRXLOS_LVL
|
1036 USER_CONTROL_OPRXFLT_LVL
|
1037 USER_CONTROL_OPTXON_LVL
|
1038 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1042 err
= mdio_write(np
, np
->phy_addr
,
1043 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1044 (USER_PMD_TX_CTL_XFP_CLKEN
|
1045 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1046 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1047 USER_PMD_TX_CTL_TSCK_LPWREN
));
1051 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1054 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1058 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1059 BCM8704_USER_OPT_DIGITAL_CTRL
);
1062 err
&= ~USER_ODIG_CTRL_GPIOS
;
1063 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1064 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1065 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1074 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1078 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1079 MRVL88X2011_LED_8_TO_11_CTL
);
1083 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1084 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1086 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1087 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1090 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1094 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1095 MRVL88X2011_LED_BLINK_CTL
);
1097 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1100 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1101 MRVL88X2011_LED_BLINK_CTL
, err
);
1107 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1111 /* Set LED functions */
1112 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1117 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1121 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1122 MRVL88X2011_GENERAL_CTL
);
1126 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1128 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1129 MRVL88X2011_GENERAL_CTL
, err
);
1133 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1134 MRVL88X2011_PMA_PMD_CTL_1
);
1138 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1139 err
|= MRVL88X2011_LOOPBACK
;
1141 err
&= ~MRVL88X2011_LOOPBACK
;
1143 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1144 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1149 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1150 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1154 static int xcvr_diag_bcm870x(struct niu
*np
)
1156 u16 analog_stat0
, tx_alarm_status
;
1160 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1164 pr_info(PFX
"Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1167 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1170 pr_info(PFX
"Port %u USER_DEV3(0x20) [%04x]\n",
1173 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1177 pr_info(PFX
"Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1181 /* XXX dig this out it might not be so useful XXX */
1182 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1183 BCM8704_USER_ANALOG_STATUS0
);
1186 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1187 BCM8704_USER_ANALOG_STATUS0
);
1192 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1193 BCM8704_USER_TX_ALARM_STATUS
);
1196 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1197 BCM8704_USER_TX_ALARM_STATUS
);
1200 tx_alarm_status
= err
;
1202 if (analog_stat0
!= 0x03fc) {
1203 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1204 pr_info(PFX
"Port %u cable not connected "
1205 "or bad cable.\n", np
->port
);
1206 } else if (analog_stat0
== 0x639c) {
1207 pr_info(PFX
"Port %u optical module is bad "
1208 "or missing.\n", np
->port
);
1215 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1217 struct niu_link_config
*lp
= &np
->link_config
;
1220 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1225 err
&= ~BMCR_LOOPBACK
;
1227 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1228 err
|= BMCR_LOOPBACK
;
1230 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1238 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1243 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1244 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1247 val
= nr64_mac(XMAC_CONFIG
);
1248 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1249 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1250 nw64_mac(XMAC_CONFIG
, val
);
1252 val
= nr64(MIF_CONFIG
);
1253 val
|= MIF_CONFIG_INDIRECT_MODE
;
1254 nw64(MIF_CONFIG
, val
);
1256 err
= bcm8704_reset(np
);
1260 err
= xcvr_10g_set_lb_bcm870x(np
);
1264 err
= bcm8706_init_user_dev3(np
);
1268 err
= xcvr_diag_bcm870x(np
);
1275 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1279 err
= bcm8704_reset(np
);
1283 err
= bcm8704_init_user_dev3(np
);
1287 err
= xcvr_10g_set_lb_bcm870x(np
);
1291 err
= xcvr_diag_bcm870x(np
);
1298 static int xcvr_init_10g(struct niu
*np
)
1303 val
= nr64_mac(XMAC_CONFIG
);
1304 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1305 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1306 nw64_mac(XMAC_CONFIG
, val
);
1308 /* XXX shared resource, lock parent XXX */
1309 val
= nr64(MIF_CONFIG
);
1310 val
|= MIF_CONFIG_INDIRECT_MODE
;
1311 nw64(MIF_CONFIG
, val
);
1313 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1314 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1316 /* handle different phy types */
1317 switch (phy_id
& NIU_PHY_ID_MASK
) {
1318 case NIU_PHY_ID_MRVL88X2011
:
1319 err
= xcvr_init_10g_mrvl88x2011(np
);
1322 default: /* bcom 8704 */
1323 err
= xcvr_init_10g_bcm8704(np
);
1330 static int mii_reset(struct niu
*np
)
1334 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1339 while (--limit
>= 0) {
1341 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1344 if (!(err
& BMCR_RESET
))
1348 dev_err(np
->device
, PFX
"Port %u MII would not reset, "
1349 "bmcr[%04x]\n", np
->port
, err
);
1356 static int xcvr_init_1g_rgmii(struct niu
*np
)
1360 u16 bmcr
, bmsr
, estat
;
1362 val
= nr64(MIF_CONFIG
);
1363 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1364 nw64(MIF_CONFIG
, val
);
1366 err
= mii_reset(np
);
1370 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1376 if (bmsr
& BMSR_ESTATEN
) {
1377 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1384 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1388 if (bmsr
& BMSR_ESTATEN
) {
1391 if (estat
& ESTATUS_1000_TFULL
)
1392 ctrl1000
|= ADVERTISE_1000FULL
;
1393 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1398 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1400 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1404 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1407 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1409 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1416 static int mii_init_common(struct niu
*np
)
1418 struct niu_link_config
*lp
= &np
->link_config
;
1419 u16 bmcr
, bmsr
, adv
, estat
;
1422 err
= mii_reset(np
);
1426 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1432 if (bmsr
& BMSR_ESTATEN
) {
1433 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1440 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1444 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1445 bmcr
|= BMCR_LOOPBACK
;
1446 if (lp
->active_speed
== SPEED_1000
)
1447 bmcr
|= BMCR_SPEED1000
;
1448 if (lp
->active_duplex
== DUPLEX_FULL
)
1449 bmcr
|= BMCR_FULLDPLX
;
1452 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1455 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1456 BCM5464R_AUX_CTL_WRITE_1
);
1457 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1462 /* XXX configurable XXX */
1463 /* XXX for now don't advertise half-duplex or asym pause... XXX */
1464 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1465 if (bmsr
& BMSR_10FULL
)
1466 adv
|= ADVERTISE_10FULL
;
1467 if (bmsr
& BMSR_100FULL
)
1468 adv
|= ADVERTISE_100FULL
;
1469 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1473 if (bmsr
& BMSR_ESTATEN
) {
1476 if (estat
& ESTATUS_1000_TFULL
)
1477 ctrl1000
|= ADVERTISE_1000FULL
;
1478 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1482 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1484 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1488 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1491 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1495 pr_info(PFX
"Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1496 np
->port
, bmcr
, bmsr
);
1502 static int xcvr_init_1g(struct niu
*np
)
1506 /* XXX shared resource, lock parent XXX */
1507 val
= nr64(MIF_CONFIG
);
1508 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1509 nw64(MIF_CONFIG
, val
);
1511 return mii_init_common(np
);
1514 static int niu_xcvr_init(struct niu
*np
)
1516 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1521 err
= ops
->xcvr_init(np
);
1526 static int niu_serdes_init(struct niu
*np
)
1528 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1532 if (ops
->serdes_init
)
1533 err
= ops
->serdes_init(np
);
1538 static void niu_init_xif(struct niu
*);
1539 static void niu_handle_led(struct niu
*, int status
);
1541 static int niu_link_status_common(struct niu
*np
, int link_up
)
1543 struct niu_link_config
*lp
= &np
->link_config
;
1544 struct net_device
*dev
= np
->dev
;
1545 unsigned long flags
;
1547 if (!netif_carrier_ok(dev
) && link_up
) {
1548 niuinfo(LINK
, "%s: Link is up at %s, %s duplex\n",
1550 (lp
->active_speed
== SPEED_10000
?
1552 (lp
->active_speed
== SPEED_1000
?
1554 (lp
->active_speed
== SPEED_100
?
1555 "100Mbit/sec" : "10Mbit/sec"))),
1556 (lp
->active_duplex
== DUPLEX_FULL
?
1559 spin_lock_irqsave(&np
->lock
, flags
);
1561 niu_handle_led(np
, 1);
1562 spin_unlock_irqrestore(&np
->lock
, flags
);
1564 netif_carrier_on(dev
);
1565 } else if (netif_carrier_ok(dev
) && !link_up
) {
1566 niuwarn(LINK
, "%s: Link is down\n", dev
->name
);
1567 spin_lock_irqsave(&np
->lock
, flags
);
1568 niu_handle_led(np
, 0);
1569 spin_unlock_irqrestore(&np
->lock
, flags
);
1570 netif_carrier_off(dev
);
1576 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1578 int err
, link_up
, pma_status
, pcs_status
;
1582 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1583 MRVL88X2011_10G_PMD_STATUS_2
);
1587 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1588 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1589 MRVL88X2011_PMA_PMD_STATUS_1
);
1593 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1595 /* Check PMC Register : 3.0001.2 == 1: read twice */
1596 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1597 MRVL88X2011_PMA_PMD_STATUS_1
);
1601 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1602 MRVL88X2011_PMA_PMD_STATUS_1
);
1606 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1608 /* Check XGXS Register : 4.0018.[0-3,12] */
1609 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1610 MRVL88X2011_10G_XGXS_LANE_STAT
);
1614 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1615 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1616 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1618 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1620 np
->link_config
.active_speed
= SPEED_10000
;
1621 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1624 mrvl88x2011_act_led(np
, (link_up
?
1625 MRVL88X2011_LED_CTL_PCS_ACT
:
1626 MRVL88X2011_LED_CTL_OFF
));
1628 *link_up_p
= link_up
;
1632 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
1637 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1638 BCM8704_PMD_RCV_SIGDET
);
1641 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1646 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1647 BCM8704_PCS_10G_R_STATUS
);
1651 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1656 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1657 BCM8704_PHYXS_XGXS_LANE_STAT
);
1660 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1661 PHYXS_XGXS_LANE_STAT_MAGIC
|
1662 PHYXS_XGXS_LANE_STAT_PATTEST
|
1663 PHYXS_XGXS_LANE_STAT_LANE3
|
1664 PHYXS_XGXS_LANE_STAT_LANE2
|
1665 PHYXS_XGXS_LANE_STAT_LANE1
|
1666 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1668 np
->link_config
.active_speed
= SPEED_INVALID
;
1669 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
1674 np
->link_config
.active_speed
= SPEED_10000
;
1675 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1679 *link_up_p
= link_up
;
1680 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
1685 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
1691 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1692 BCM8704_PMD_RCV_SIGDET
);
1695 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1700 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1701 BCM8704_PCS_10G_R_STATUS
);
1704 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1709 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1710 BCM8704_PHYXS_XGXS_LANE_STAT
);
1714 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1715 PHYXS_XGXS_LANE_STAT_MAGIC
|
1716 PHYXS_XGXS_LANE_STAT_LANE3
|
1717 PHYXS_XGXS_LANE_STAT_LANE2
|
1718 PHYXS_XGXS_LANE_STAT_LANE1
|
1719 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1725 np
->link_config
.active_speed
= SPEED_10000
;
1726 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1730 *link_up_p
= link_up
;
1734 static int link_status_10g(struct niu
*np
, int *link_up_p
)
1736 unsigned long flags
;
1739 spin_lock_irqsave(&np
->lock
, flags
);
1741 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
1744 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1745 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1747 /* handle different phy types */
1748 switch (phy_id
& NIU_PHY_ID_MASK
) {
1749 case NIU_PHY_ID_MRVL88X2011
:
1750 err
= link_status_10g_mrvl(np
, link_up_p
);
1753 default: /* bcom 8704 */
1754 err
= link_status_10g_bcom(np
, link_up_p
);
1759 spin_unlock_irqrestore(&np
->lock
, flags
);
1764 static int niu_10g_phy_present(struct niu
*np
)
1768 sig
= nr64(ESR_INT_SIGNALS
);
1771 mask
= ESR_INT_SIGNALS_P0_BITS
;
1772 val
= (ESR_INT_SRDY0_P0
|
1775 ESR_INT_XDP_P0_CH3
|
1776 ESR_INT_XDP_P0_CH2
|
1777 ESR_INT_XDP_P0_CH1
|
1778 ESR_INT_XDP_P0_CH0
);
1782 mask
= ESR_INT_SIGNALS_P1_BITS
;
1783 val
= (ESR_INT_SRDY0_P1
|
1786 ESR_INT_XDP_P1_CH3
|
1787 ESR_INT_XDP_P1_CH2
|
1788 ESR_INT_XDP_P1_CH1
|
1789 ESR_INT_XDP_P1_CH0
);
1796 if ((sig
& mask
) != val
)
1801 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
1803 unsigned long flags
;
1806 int phy_present_prev
;
1808 spin_lock_irqsave(&np
->lock
, flags
);
1810 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
1811 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
1813 phy_present
= niu_10g_phy_present(np
);
1814 if (phy_present
!= phy_present_prev
) {
1817 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1818 if (np
->phy_ops
->xcvr_init
)
1819 err
= np
->phy_ops
->xcvr_init(np
);
1822 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1825 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1827 niuwarn(LINK
, "%s: Hotplug PHY Removed\n",
1831 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
)
1832 err
= link_status_10g_bcm8706(np
, link_up_p
);
1835 spin_unlock_irqrestore(&np
->lock
, flags
);
1840 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1842 struct niu_link_config
*lp
= &np
->link_config
;
1843 u16 current_speed
, bmsr
;
1844 unsigned long flags
;
1849 current_speed
= SPEED_INVALID
;
1850 current_duplex
= DUPLEX_INVALID
;
1852 spin_lock_irqsave(&np
->lock
, flags
);
1855 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
1858 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1863 if (bmsr
& BMSR_LSTATUS
) {
1864 u16 adv
, lpa
, common
, estat
;
1866 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1871 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1878 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1884 if (estat
& (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
)) {
1885 current_speed
= SPEED_1000
;
1886 if (estat
& ESTATUS_1000_TFULL
)
1887 current_duplex
= DUPLEX_FULL
;
1889 current_duplex
= DUPLEX_HALF
;
1891 if (common
& ADVERTISE_100BASE4
) {
1892 current_speed
= SPEED_100
;
1893 current_duplex
= DUPLEX_HALF
;
1894 } else if (common
& ADVERTISE_100FULL
) {
1895 current_speed
= SPEED_100
;
1896 current_duplex
= DUPLEX_FULL
;
1897 } else if (common
& ADVERTISE_100HALF
) {
1898 current_speed
= SPEED_100
;
1899 current_duplex
= DUPLEX_HALF
;
1900 } else if (common
& ADVERTISE_10FULL
) {
1901 current_speed
= SPEED_10
;
1902 current_duplex
= DUPLEX_FULL
;
1903 } else if (common
& ADVERTISE_10HALF
) {
1904 current_speed
= SPEED_10
;
1905 current_duplex
= DUPLEX_HALF
;
1910 lp
->active_speed
= current_speed
;
1911 lp
->active_duplex
= current_duplex
;
1915 spin_unlock_irqrestore(&np
->lock
, flags
);
1917 *link_up_p
= link_up
;
1921 static int niu_link_status(struct niu
*np
, int *link_up_p
)
1923 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1927 if (ops
->link_status
)
1928 err
= ops
->link_status(np
, link_up_p
);
1933 static void niu_timer(unsigned long __opaque
)
1935 struct niu
*np
= (struct niu
*) __opaque
;
1939 err
= niu_link_status(np
, &link_up
);
1941 niu_link_status_common(np
, link_up
);
1943 if (netif_carrier_ok(np
->dev
))
1947 np
->timer
.expires
= jiffies
+ off
;
1949 add_timer(&np
->timer
);
1952 static const struct niu_phy_ops phy_ops_10g_serdes
= {
1953 .serdes_init
= serdes_init_10g_serdes
,
1954 .link_status
= link_status_10g_serdes
,
1957 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
1958 .xcvr_init
= xcvr_init_1g_rgmii
,
1959 .link_status
= link_status_1g_rgmii
,
1962 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
1963 .serdes_init
= serdes_init_niu
,
1964 .xcvr_init
= xcvr_init_10g
,
1965 .link_status
= link_status_10g
,
1968 static const struct niu_phy_ops phy_ops_10g_fiber
= {
1969 .serdes_init
= serdes_init_10g
,
1970 .xcvr_init
= xcvr_init_10g
,
1971 .link_status
= link_status_10g
,
1974 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
1975 .serdes_init
= serdes_init_10g
,
1976 .xcvr_init
= xcvr_init_10g_bcm8706
,
1977 .link_status
= link_status_10g_hotplug
,
1980 static const struct niu_phy_ops phy_ops_10g_copper
= {
1981 .serdes_init
= serdes_init_10g
,
1982 .link_status
= link_status_10g
, /* XXX */
1985 static const struct niu_phy_ops phy_ops_1g_fiber
= {
1986 .serdes_init
= serdes_init_1g
,
1987 .xcvr_init
= xcvr_init_1g
,
1988 .link_status
= link_status_1g
,
1991 static const struct niu_phy_ops phy_ops_1g_copper
= {
1992 .xcvr_init
= xcvr_init_1g
,
1993 .link_status
= link_status_1g
,
1996 struct niu_phy_template
{
1997 const struct niu_phy_ops
*ops
;
2001 static const struct niu_phy_template phy_template_niu
= {
2002 .ops
= &phy_ops_10g_fiber_niu
,
2003 .phy_addr_base
= 16,
2006 static const struct niu_phy_template phy_template_10g_fiber
= {
2007 .ops
= &phy_ops_10g_fiber
,
2011 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2012 .ops
= &phy_ops_10g_fiber_hotplug
,
2016 static const struct niu_phy_template phy_template_10g_copper
= {
2017 .ops
= &phy_ops_10g_copper
,
2018 .phy_addr_base
= 10,
2021 static const struct niu_phy_template phy_template_1g_fiber
= {
2022 .ops
= &phy_ops_1g_fiber
,
2026 static const struct niu_phy_template phy_template_1g_copper
= {
2027 .ops
= &phy_ops_1g_copper
,
2031 static const struct niu_phy_template phy_template_1g_rgmii
= {
2032 .ops
= &phy_ops_1g_rgmii
,
2036 static const struct niu_phy_template phy_template_10g_serdes
= {
2037 .ops
= &phy_ops_10g_serdes
,
2041 static int niu_atca_port_num
[4] = {
2045 static int serdes_init_10g_serdes(struct niu
*np
)
2047 struct niu_link_config
*lp
= &np
->link_config
;
2048 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2049 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2055 reset_val
= ENET_SERDES_RESET_0
;
2056 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2057 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2058 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2061 reset_val
= ENET_SERDES_RESET_1
;
2062 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2063 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2064 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2070 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2071 ENET_SERDES_CTRL_SDET_1
|
2072 ENET_SERDES_CTRL_SDET_2
|
2073 ENET_SERDES_CTRL_SDET_3
|
2074 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2075 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2076 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2077 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2078 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2079 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2080 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2081 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2084 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2085 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2086 ENET_SERDES_TEST_MD_0_SHIFT
) |
2087 (ENET_TEST_MD_PAD_LOOPBACK
<<
2088 ENET_SERDES_TEST_MD_1_SHIFT
) |
2089 (ENET_TEST_MD_PAD_LOOPBACK
<<
2090 ENET_SERDES_TEST_MD_2_SHIFT
) |
2091 (ENET_TEST_MD_PAD_LOOPBACK
<<
2092 ENET_SERDES_TEST_MD_3_SHIFT
));
2096 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2097 nw64(ctrl_reg
, ctrl_val
);
2098 nw64(test_cfg_reg
, test_cfg_val
);
2100 /* Initialize all 4 lanes of the SERDES. */
2101 for (i
= 0; i
< 4; i
++) {
2102 u32 rxtx_ctrl
, glue0
;
2104 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2107 err
= esr_read_glue0(np
, i
, &glue0
);
2111 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2112 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2113 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2115 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2116 ESR_GLUE_CTRL0_THCNT
|
2117 ESR_GLUE_CTRL0_BLTIME
);
2118 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2119 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2120 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2121 (BLTIME_300_CYCLES
<<
2122 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2124 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2127 err
= esr_write_glue0(np
, i
, glue0
);
2133 sig
= nr64(ESR_INT_SIGNALS
);
2136 mask
= ESR_INT_SIGNALS_P0_BITS
;
2137 val
= (ESR_INT_SRDY0_P0
|
2140 ESR_INT_XDP_P0_CH3
|
2141 ESR_INT_XDP_P0_CH2
|
2142 ESR_INT_XDP_P0_CH1
|
2143 ESR_INT_XDP_P0_CH0
);
2147 mask
= ESR_INT_SIGNALS_P1_BITS
;
2148 val
= (ESR_INT_SRDY0_P1
|
2151 ESR_INT_XDP_P1_CH3
|
2152 ESR_INT_XDP_P1_CH2
|
2153 ESR_INT_XDP_P1_CH1
|
2154 ESR_INT_XDP_P1_CH0
);
2161 if ((sig
& mask
) != val
) {
2163 err
= serdes_init_1g_serdes(np
);
2165 np
->flags
&= ~NIU_FLAGS_10G
;
2166 np
->mac_xcvr
= MAC_XCVR_PCS
;
2168 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES Link Failed \n",
2177 static int niu_determine_phy_disposition(struct niu
*np
)
2179 struct niu_parent
*parent
= np
->parent
;
2180 u8 plat_type
= parent
->plat_type
;
2181 const struct niu_phy_template
*tp
;
2182 u32 phy_addr_off
= 0;
2184 if (plat_type
== PLAT_TYPE_NIU
) {
2185 tp
= &phy_template_niu
;
2186 phy_addr_off
+= np
->port
;
2191 NIU_FLAGS_XCVR_SERDES
)) {
2194 tp
= &phy_template_1g_copper
;
2195 if (plat_type
== PLAT_TYPE_VF_P0
)
2197 else if (plat_type
== PLAT_TYPE_VF_P1
)
2200 phy_addr_off
+= (np
->port
^ 0x3);
2205 tp
= &phy_template_1g_copper
;
2208 case NIU_FLAGS_FIBER
:
2210 tp
= &phy_template_1g_fiber
;
2213 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2215 tp
= &phy_template_10g_fiber
;
2216 if (plat_type
== PLAT_TYPE_VF_P0
||
2217 plat_type
== PLAT_TYPE_VF_P1
)
2219 phy_addr_off
+= np
->port
;
2220 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2221 tp
= &phy_template_10g_fiber_hotplug
;
2229 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2230 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2231 case NIU_FLAGS_XCVR_SERDES
:
2235 tp
= &phy_template_10g_serdes
;
2239 tp
= &phy_template_1g_rgmii
;
2245 phy_addr_off
= niu_atca_port_num
[np
->port
];
2253 np
->phy_ops
= tp
->ops
;
2254 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2259 static int niu_init_link(struct niu
*np
)
2261 struct niu_parent
*parent
= np
->parent
;
2264 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2265 err
= niu_xcvr_init(np
);
2270 err
= niu_serdes_init(np
);
2274 err
= niu_xcvr_init(np
);
2276 niu_link_status(np
, &ignore
);
2280 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2282 u16 reg0
= addr
[4] << 8 | addr
[5];
2283 u16 reg1
= addr
[2] << 8 | addr
[3];
2284 u16 reg2
= addr
[0] << 8 | addr
[1];
2286 if (np
->flags
& NIU_FLAGS_XMAC
) {
2287 nw64_mac(XMAC_ADDR0
, reg0
);
2288 nw64_mac(XMAC_ADDR1
, reg1
);
2289 nw64_mac(XMAC_ADDR2
, reg2
);
2291 nw64_mac(BMAC_ADDR0
, reg0
);
2292 nw64_mac(BMAC_ADDR1
, reg1
);
2293 nw64_mac(BMAC_ADDR2
, reg2
);
2297 static int niu_num_alt_addr(struct niu
*np
)
2299 if (np
->flags
& NIU_FLAGS_XMAC
)
2300 return XMAC_NUM_ALT_ADDR
;
2302 return BMAC_NUM_ALT_ADDR
;
2305 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2307 u16 reg0
= addr
[4] << 8 | addr
[5];
2308 u16 reg1
= addr
[2] << 8 | addr
[3];
2309 u16 reg2
= addr
[0] << 8 | addr
[1];
2311 if (index
>= niu_num_alt_addr(np
))
2314 if (np
->flags
& NIU_FLAGS_XMAC
) {
2315 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2316 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2317 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2319 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2320 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2321 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2327 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2332 if (index
>= niu_num_alt_addr(np
))
2335 if (np
->flags
& NIU_FLAGS_XMAC
) {
2336 reg
= XMAC_ADDR_CMPEN
;
2339 reg
= BMAC_ADDR_CMPEN
;
2340 mask
= 1 << (index
+ 1);
2343 val
= nr64_mac(reg
);
2353 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2354 int num
, int mac_pref
)
2356 u64 val
= nr64_mac(reg
);
2357 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2360 val
|= HOST_INFO_MPR
;
2364 static int __set_rdc_table_num(struct niu
*np
,
2365 int xmac_index
, int bmac_index
,
2366 int rdc_table_num
, int mac_pref
)
2370 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2372 if (np
->flags
& NIU_FLAGS_XMAC
)
2373 reg
= XMAC_HOST_INFO(xmac_index
);
2375 reg
= BMAC_HOST_INFO(bmac_index
);
2376 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2380 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2383 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2386 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2389 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2392 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2393 int table_num
, int mac_pref
)
2395 if (idx
>= niu_num_alt_addr(np
))
2397 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2400 static u64
vlan_entry_set_parity(u64 reg_val
)
2405 port01_mask
= 0x00ff;
2406 port23_mask
= 0xff00;
2408 if (hweight64(reg_val
& port01_mask
) & 1)
2409 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2411 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2413 if (hweight64(reg_val
& port23_mask
) & 1)
2414 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2416 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2421 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2422 int port
, int vpr
, int rdc_table
)
2424 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2426 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2427 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2428 ENET_VLAN_TBL_SHIFT(port
));
2430 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2431 ENET_VLAN_TBL_SHIFT(port
));
2432 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2434 reg_val
= vlan_entry_set_parity(reg_val
);
2436 nw64(ENET_VLAN_TBL(index
), reg_val
);
2439 static void vlan_tbl_clear(struct niu
*np
)
2443 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2444 nw64(ENET_VLAN_TBL(i
), 0);
2447 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2451 while (--limit
> 0) {
2452 if (nr64(TCAM_CTL
) & bit
)
2462 static int tcam_flush(struct niu
*np
, int index
)
2464 nw64(TCAM_KEY_0
, 0x00);
2465 nw64(TCAM_KEY_MASK_0
, 0xff);
2466 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2468 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2472 static int tcam_read(struct niu
*np
, int index
,
2473 u64
*key
, u64
*mask
)
2477 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2478 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2480 key
[0] = nr64(TCAM_KEY_0
);
2481 key
[1] = nr64(TCAM_KEY_1
);
2482 key
[2] = nr64(TCAM_KEY_2
);
2483 key
[3] = nr64(TCAM_KEY_3
);
2484 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2485 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2486 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2487 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2493 static int tcam_write(struct niu
*np
, int index
,
2494 u64
*key
, u64
*mask
)
2496 nw64(TCAM_KEY_0
, key
[0]);
2497 nw64(TCAM_KEY_1
, key
[1]);
2498 nw64(TCAM_KEY_2
, key
[2]);
2499 nw64(TCAM_KEY_3
, key
[3]);
2500 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2501 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2502 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2503 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2504 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2506 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2510 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2514 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2515 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2517 *data
= nr64(TCAM_KEY_1
);
2523 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2525 nw64(TCAM_KEY_1
, assoc_data
);
2526 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2528 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2531 static void tcam_enable(struct niu
*np
, int on
)
2533 u64 val
= nr64(FFLP_CFG_1
);
2536 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2538 val
|= FFLP_CFG_1_TCAM_DIS
;
2539 nw64(FFLP_CFG_1
, val
);
2542 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2544 u64 val
= nr64(FFLP_CFG_1
);
2546 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2548 FFLP_CFG_1_CAMRATIO
);
2549 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2550 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2551 nw64(FFLP_CFG_1
, val
);
2553 val
= nr64(FFLP_CFG_1
);
2554 val
|= FFLP_CFG_1_FFLPINITDONE
;
2555 nw64(FFLP_CFG_1
, val
);
2558 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2564 if (class < CLASS_CODE_ETHERTYPE1
||
2565 class > CLASS_CODE_ETHERTYPE2
)
2568 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2580 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2586 if (class < CLASS_CODE_ETHERTYPE1
||
2587 class > CLASS_CODE_ETHERTYPE2
||
2588 (ether_type
& ~(u64
)0xffff) != 0)
2591 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2593 val
&= ~L2_CLS_ETYPE
;
2594 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2601 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2607 if (class < CLASS_CODE_USER_PROG1
||
2608 class > CLASS_CODE_USER_PROG4
)
2611 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2614 val
|= L3_CLS_VALID
;
2616 val
&= ~L3_CLS_VALID
;
2623 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2624 int ipv6
, u64 protocol_id
,
2625 u64 tos_mask
, u64 tos_val
)
2630 if (class < CLASS_CODE_USER_PROG1
||
2631 class > CLASS_CODE_USER_PROG4
||
2632 (protocol_id
& ~(u64
)0xff) != 0 ||
2633 (tos_mask
& ~(u64
)0xff) != 0 ||
2634 (tos_val
& ~(u64
)0xff) != 0)
2637 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2639 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
2640 L3_CLS_TOSMASK
| L3_CLS_TOS
);
2642 val
|= L3_CLS_IPVER
;
2643 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
2644 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
2645 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
2652 static int tcam_early_init(struct niu
*np
)
2658 tcam_set_lat_and_ratio(np
,
2659 DEFAULT_TCAM_LATENCY
,
2660 DEFAULT_TCAM_ACCESS_RATIO
);
2661 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
2662 err
= tcam_user_eth_class_enable(np
, i
, 0);
2666 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
2667 err
= tcam_user_ip_class_enable(np
, i
, 0);
2675 static int tcam_flush_all(struct niu
*np
)
2679 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
2680 int err
= tcam_flush(np
, i
);
2687 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
2689 return ((u64
)index
| (num_entries
== 1 ?
2690 HASH_TBL_ADDR_AUTOINC
: 0));
2694 static int hash_read(struct niu
*np
, unsigned long partition
,
2695 unsigned long index
, unsigned long num_entries
,
2698 u64 val
= hash_addr_regval(index
, num_entries
);
2701 if (partition
>= FCRAM_NUM_PARTITIONS
||
2702 index
+ num_entries
> FCRAM_SIZE
)
2705 nw64(HASH_TBL_ADDR(partition
), val
);
2706 for (i
= 0; i
< num_entries
; i
++)
2707 data
[i
] = nr64(HASH_TBL_DATA(partition
));
2713 static int hash_write(struct niu
*np
, unsigned long partition
,
2714 unsigned long index
, unsigned long num_entries
,
2717 u64 val
= hash_addr_regval(index
, num_entries
);
2720 if (partition
>= FCRAM_NUM_PARTITIONS
||
2721 index
+ (num_entries
* 8) > FCRAM_SIZE
)
2724 nw64(HASH_TBL_ADDR(partition
), val
);
2725 for (i
= 0; i
< num_entries
; i
++)
2726 nw64(HASH_TBL_DATA(partition
), data
[i
]);
2731 static void fflp_reset(struct niu
*np
)
2735 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
2737 nw64(FFLP_CFG_1
, 0);
2739 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
2740 nw64(FFLP_CFG_1
, val
);
2743 static void fflp_set_timings(struct niu
*np
)
2745 u64 val
= nr64(FFLP_CFG_1
);
2747 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
2748 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
2749 nw64(FFLP_CFG_1
, val
);
2751 val
= nr64(FFLP_CFG_1
);
2752 val
|= FFLP_CFG_1_FFLPINITDONE
;
2753 nw64(FFLP_CFG_1
, val
);
2755 val
= nr64(FCRAM_REF_TMR
);
2756 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
2757 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
2758 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
2759 nw64(FCRAM_REF_TMR
, val
);
2762 static int fflp_set_partition(struct niu
*np
, u64 partition
,
2763 u64 mask
, u64 base
, int enable
)
2768 if (partition
>= FCRAM_NUM_PARTITIONS
||
2769 (mask
& ~(u64
)0x1f) != 0 ||
2770 (base
& ~(u64
)0x1f) != 0)
2773 reg
= FLW_PRT_SEL(partition
);
2776 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
2777 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
2778 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
2780 val
|= FLW_PRT_SEL_EXT
;
2786 static int fflp_disable_all_partitions(struct niu
*np
)
2790 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
2791 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
2798 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
2800 u64 val
= nr64(FFLP_CFG_1
);
2803 val
|= FFLP_CFG_1_LLCSNAP
;
2805 val
&= ~FFLP_CFG_1_LLCSNAP
;
2806 nw64(FFLP_CFG_1
, val
);
2809 static void fflp_errors_enable(struct niu
*np
, int on
)
2811 u64 val
= nr64(FFLP_CFG_1
);
2814 val
&= ~FFLP_CFG_1_ERRORDIS
;
2816 val
|= FFLP_CFG_1_ERRORDIS
;
2817 nw64(FFLP_CFG_1
, val
);
2820 static int fflp_hash_clear(struct niu
*np
)
2822 struct fcram_hash_ipv4 ent
;
2825 /* IPV4 hash entry with valid bit clear, rest is don't care. */
2826 memset(&ent
, 0, sizeof(ent
));
2827 ent
.header
= HASH_HEADER_EXT
;
2829 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
2830 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
2837 static int fflp_early_init(struct niu
*np
)
2839 struct niu_parent
*parent
;
2840 unsigned long flags
;
2843 niu_lock_parent(np
, flags
);
2845 parent
= np
->parent
;
2847 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
2848 niudbg(PROBE
, "fflp_early_init: Initting hw on port %u\n",
2850 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
2852 fflp_set_timings(np
);
2853 err
= fflp_disable_all_partitions(np
);
2855 niudbg(PROBE
, "fflp_disable_all_partitions "
2856 "failed, err=%d\n", err
);
2861 err
= tcam_early_init(np
);
2863 niudbg(PROBE
, "tcam_early_init failed, err=%d\n",
2867 fflp_llcsnap_enable(np
, 1);
2868 fflp_errors_enable(np
, 0);
2872 err
= tcam_flush_all(np
);
2874 niudbg(PROBE
, "tcam_flush_all failed, err=%d\n",
2878 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
2879 err
= fflp_hash_clear(np
);
2881 niudbg(PROBE
, "fflp_hash_clear failed, "
2889 niudbg(PROBE
, "fflp_early_init: Success\n");
2890 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
2893 niu_unlock_parent(np
, flags
);
2897 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
2899 if (class_code
< CLASS_CODE_USER_PROG1
||
2900 class_code
> CLASS_CODE_SCTP_IPV6
)
2903 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
2907 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
2909 if (class_code
< CLASS_CODE_USER_PROG1
||
2910 class_code
> CLASS_CODE_SCTP_IPV6
)
2913 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
2917 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
2918 u32 offset
, u32 size
)
2920 int i
= skb_shinfo(skb
)->nr_frags
;
2921 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2924 frag
->page_offset
= offset
;
2928 skb
->data_len
+= size
;
2929 skb
->truesize
+= size
;
2931 skb_shinfo(skb
)->nr_frags
= i
+ 1;
2934 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
2937 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
2939 return (a
& (MAX_RBR_RING_SIZE
- 1));
2942 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
2943 struct page
***link
)
2945 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
2946 struct page
*p
, **pp
;
2949 pp
= &rp
->rxhash
[h
];
2950 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
2951 if (p
->index
== addr
) {
2960 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
2962 unsigned int h
= niu_hash_rxaddr(rp
, base
);
2965 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
2966 rp
->rxhash
[h
] = page
;
2969 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
2970 gfp_t mask
, int start_index
)
2976 page
= alloc_page(mask
);
2980 addr
= np
->ops
->map_page(np
->device
, page
, 0,
2981 PAGE_SIZE
, DMA_FROM_DEVICE
);
2983 niu_hash_page(rp
, page
, addr
);
2984 if (rp
->rbr_blocks_per_page
> 1)
2985 atomic_add(rp
->rbr_blocks_per_page
- 1,
2986 &compound_head(page
)->_count
);
2988 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
2989 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
2991 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
2992 addr
+= rp
->rbr_block_size
;
2998 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3000 int index
= rp
->rbr_index
;
3003 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3004 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3006 if (unlikely(err
)) {
3011 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3012 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3013 if (rp
->rbr_index
== rp
->rbr_table_size
)
3016 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3017 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3018 rp
->rbr_pending
= 0;
3023 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3025 unsigned int index
= rp
->rcr_index
;
3030 struct page
*page
, **link
;
3036 val
= le64_to_cpup(&rp
->rcr
[index
]);
3037 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3038 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3039 page
= niu_find_rxpage(rp
, addr
, &link
);
3041 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3042 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3043 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3044 *link
= (struct page
*) page
->mapping
;
3045 np
->ops
->unmap_page(np
->device
, page
->index
,
3046 PAGE_SIZE
, DMA_FROM_DEVICE
);
3048 page
->mapping
= NULL
;
3050 rp
->rbr_refill_pending
++;
3053 index
= NEXT_RCR(rp
, index
);
3054 if (!(val
& RCR_ENTRY_MULTI
))
3058 rp
->rcr_index
= index
;
3063 static int niu_process_rx_pkt(struct niu
*np
, struct rx_ring_info
*rp
)
3065 unsigned int index
= rp
->rcr_index
;
3066 struct sk_buff
*skb
;
3069 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3071 return niu_rx_pkt_ignore(np
, rp
);
3075 struct page
*page
, **link
;
3076 u32 rcr_size
, append_size
;
3081 val
= le64_to_cpup(&rp
->rcr
[index
]);
3083 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3084 RCR_ENTRY_L2_LEN_SHIFT
;
3087 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3088 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3089 page
= niu_find_rxpage(rp
, addr
, &link
);
3091 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3092 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3094 off
= addr
& ~PAGE_MASK
;
3095 append_size
= rcr_size
;
3102 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3103 if ((ptype
== RCR_PKT_TYPE_TCP
||
3104 ptype
== RCR_PKT_TYPE_UDP
) &&
3105 !(val
& (RCR_ENTRY_NOPORT
|
3107 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3109 skb
->ip_summed
= CHECKSUM_NONE
;
3111 if (!(val
& RCR_ENTRY_MULTI
))
3112 append_size
= len
- skb
->len
;
3114 niu_rx_skb_append(skb
, page
, off
, append_size
);
3115 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3116 *link
= (struct page
*) page
->mapping
;
3117 np
->ops
->unmap_page(np
->device
, page
->index
,
3118 PAGE_SIZE
, DMA_FROM_DEVICE
);
3120 page
->mapping
= NULL
;
3121 rp
->rbr_refill_pending
++;
3125 index
= NEXT_RCR(rp
, index
);
3126 if (!(val
& RCR_ENTRY_MULTI
))
3130 rp
->rcr_index
= index
;
3132 skb_reserve(skb
, NET_IP_ALIGN
);
3133 __pskb_pull_tail(skb
, min(len
, NIU_RXPULL_MAX
));
3136 rp
->rx_bytes
+= skb
->len
;
3138 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3139 netif_receive_skb(skb
);
3141 np
->dev
->last_rx
= jiffies
;
3146 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3148 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3149 int err
, index
= rp
->rbr_index
;
3152 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3153 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3157 index
+= blocks_per_page
;
3160 rp
->rbr_index
= index
;
3164 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3168 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3171 page
= rp
->rxhash
[i
];
3173 struct page
*next
= (struct page
*) page
->mapping
;
3174 u64 base
= page
->index
;
3176 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3179 page
->mapping
= NULL
;
3187 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3188 rp
->rbr
[i
] = cpu_to_le32(0);
3192 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3194 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3195 struct sk_buff
*skb
= tb
->skb
;
3196 struct tx_pkt_hdr
*tp
;
3200 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3201 tx_flags
= le64_to_cpup(&tp
->flags
);
3204 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3205 ((tx_flags
& TXHDR_PAD
) / 2));
3207 len
= skb_headlen(skb
);
3208 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3209 len
, DMA_TO_DEVICE
);
3211 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3216 idx
= NEXT_TX(rp
, idx
);
3217 len
-= MAX_TX_DESC_LEN
;
3220 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3221 tb
= &rp
->tx_buffs
[idx
];
3222 BUG_ON(tb
->skb
!= NULL
);
3223 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3224 skb_shinfo(skb
)->frags
[i
].size
,
3226 idx
= NEXT_TX(rp
, idx
);
3234 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3236 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3238 struct netdev_queue
*txq
;
3243 index
= (rp
- np
->tx_rings
);
3244 txq
= netdev_get_tx_queue(np
->dev
, index
);
3247 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3250 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3251 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3252 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3254 rp
->last_pkt_cnt
= tmp
;
3258 niudbg(TX_DONE
, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3259 np
->dev
->name
, pkt_cnt
, cons
);
3262 cons
= release_tx_packet(np
, rp
, cons
);
3268 if (unlikely(netif_tx_queue_stopped(txq
) &&
3269 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3270 __netif_tx_lock(txq
, smp_processor_id());
3271 if (netif_tx_queue_stopped(txq
) &&
3272 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3273 netif_tx_wake_queue(txq
);
3274 __netif_tx_unlock(txq
);
3278 static int niu_rx_work(struct niu
*np
, struct rx_ring_info
*rp
, int budget
)
3280 int qlen
, rcr_done
= 0, work_done
= 0;
3281 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3285 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3286 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3288 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3289 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3291 mbox
->rx_dma_ctl_stat
= 0;
3292 mbox
->rcrstat_a
= 0;
3294 niudbg(RX_STATUS
, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3295 np
->dev
->name
, rp
->rx_channel
, (unsigned long long) stat
, qlen
);
3297 rcr_done
= work_done
= 0;
3298 qlen
= min(qlen
, budget
);
3299 while (work_done
< qlen
) {
3300 rcr_done
+= niu_process_rx_pkt(np
, rp
);
3304 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3307 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3308 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3309 rp
->rbr_refill_pending
= 0;
3312 stat
= (RX_DMA_CTL_STAT_MEX
|
3313 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3314 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3316 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3321 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3324 u32 tx_vec
= (v0
>> 32);
3325 u32 rx_vec
= (v0
& 0xffffffff);
3326 int i
, work_done
= 0;
3328 niudbg(INTR
, "%s: niu_poll_core() v0[%016llx]\n",
3329 np
->dev
->name
, (unsigned long long) v0
);
3331 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3332 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3333 if (tx_vec
& (1 << rp
->tx_channel
))
3334 niu_tx_work(np
, rp
);
3335 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3338 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3339 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3341 if (rx_vec
& (1 << rp
->rx_channel
)) {
3344 this_work_done
= niu_rx_work(np
, rp
,
3347 budget
-= this_work_done
;
3348 work_done
+= this_work_done
;
3350 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3356 static int niu_poll(struct napi_struct
*napi
, int budget
)
3358 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3359 struct niu
*np
= lp
->np
;
3362 work_done
= niu_poll_core(np
, lp
, budget
);
3364 if (work_done
< budget
) {
3365 netif_rx_complete(np
->dev
, napi
);
3366 niu_ldg_rearm(np
, lp
, 1);
3371 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3374 dev_err(np
->device
, PFX
"%s: RX channel %u errors ( ",
3375 np
->dev
->name
, rp
->rx_channel
);
3377 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3378 printk("RBR_TMOUT ");
3379 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3381 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3382 printk("BYTE_EN_BUS ");
3383 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3385 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3387 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3388 printk("RCR_SHA_PAR ");
3389 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3390 printk("RBR_PRE_PAR ");
3391 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3393 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3394 printk("RCRINCON ");
3395 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3397 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3399 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3400 printk("RBRLOGPAGE ");
3401 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3402 printk("CFIGLOGPAGE ");
3403 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3409 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3411 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3415 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3416 RX_DMA_CTL_STAT_PORT_FATAL
))
3420 dev_err(np
->device
, PFX
"%s: RX channel %u error, stat[%llx]\n",
3421 np
->dev
->name
, rp
->rx_channel
,
3422 (unsigned long long) stat
);
3424 niu_log_rxchan_errors(np
, rp
, stat
);
3427 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3428 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3433 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3436 dev_err(np
->device
, PFX
"%s: TX channel %u errors ( ",
3437 np
->dev
->name
, rp
->tx_channel
);
3439 if (cs
& TX_CS_MBOX_ERR
)
3441 if (cs
& TX_CS_PKT_SIZE_ERR
)
3442 printk("PKT_SIZE ");
3443 if (cs
& TX_CS_TX_RING_OFLOW
)
3444 printk("TX_RING_OFLOW ");
3445 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3446 printk("PREF_BUF_PAR ");
3447 if (cs
& TX_CS_NACK_PREF
)
3448 printk("NACK_PREF ");
3449 if (cs
& TX_CS_NACK_PKT_RD
)
3450 printk("NACK_PKT_RD ");
3451 if (cs
& TX_CS_CONF_PART_ERR
)
3452 printk("CONF_PART ");
3453 if (cs
& TX_CS_PKT_PRT_ERR
)
3459 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3463 cs
= nr64(TX_CS(rp
->tx_channel
));
3464 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3465 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3467 dev_err(np
->device
, PFX
"%s: TX channel %u error, "
3468 "cs[%llx] logh[%llx] logl[%llx]\n",
3469 np
->dev
->name
, rp
->tx_channel
,
3470 (unsigned long long) cs
,
3471 (unsigned long long) logh
,
3472 (unsigned long long) logl
);
3474 niu_log_txchan_errors(np
, rp
, cs
);
3479 static int niu_mif_interrupt(struct niu
*np
)
3481 u64 mif_status
= nr64(MIF_STATUS
);
3484 if (np
->flags
& NIU_FLAGS_XMAC
) {
3485 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3487 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3491 dev_err(np
->device
, PFX
"%s: MIF interrupt, "
3492 "stat[%llx] phy_mdint(%d)\n",
3493 np
->dev
->name
, (unsigned long long) mif_status
, phy_mdint
);
3498 static void niu_xmac_interrupt(struct niu
*np
)
3500 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3503 val
= nr64_mac(XTXMAC_STATUS
);
3504 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3505 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3506 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3507 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3508 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3509 mp
->tx_fifo_errors
++;
3510 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3511 mp
->tx_overflow_errors
++;
3512 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3513 mp
->tx_max_pkt_size_errors
++;
3514 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3515 mp
->tx_underflow_errors
++;
3517 val
= nr64_mac(XRXMAC_STATUS
);
3518 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3519 mp
->rx_local_faults
++;
3520 if (val
& XRXMAC_STATUS_RFLT_DET
)
3521 mp
->rx_remote_faults
++;
3522 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3523 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3524 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3525 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3526 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3527 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3528 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3529 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3530 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3531 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3532 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3533 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3534 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3535 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3536 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3537 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3538 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3539 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3540 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3541 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3542 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3543 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3544 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3545 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3546 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3547 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3548 if (val
& XRXMAC_STAT_MSK_RXOCTET_CNT_EXP
)
3549 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3550 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3551 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3552 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3553 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3554 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3555 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3556 if (val
& XRXMAC_STATUS_RXUFLOW
)
3557 mp
->rx_underflows
++;
3558 if (val
& XRXMAC_STATUS_RXOFLOW
)
3561 val
= nr64_mac(XMAC_FC_STAT
);
3562 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
3563 mp
->pause_off_state
++;
3564 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
3565 mp
->pause_on_state
++;
3566 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
3567 mp
->pause_received
++;
3570 static void niu_bmac_interrupt(struct niu
*np
)
3572 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
3575 val
= nr64_mac(BTXMAC_STATUS
);
3576 if (val
& BTXMAC_STATUS_UNDERRUN
)
3577 mp
->tx_underflow_errors
++;
3578 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
3579 mp
->tx_max_pkt_size_errors
++;
3580 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
3581 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
3582 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
3583 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
3585 val
= nr64_mac(BRXMAC_STATUS
);
3586 if (val
& BRXMAC_STATUS_OVERFLOW
)
3588 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
3589 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
3590 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
3591 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3592 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
3593 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3594 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
3595 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
3597 val
= nr64_mac(BMAC_CTRL_STATUS
);
3598 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
3599 mp
->pause_off_state
++;
3600 if (val
& BMAC_CTRL_STATUS_PAUSE
)
3601 mp
->pause_on_state
++;
3602 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
3603 mp
->pause_received
++;
3606 static int niu_mac_interrupt(struct niu
*np
)
3608 if (np
->flags
& NIU_FLAGS_XMAC
)
3609 niu_xmac_interrupt(np
);
3611 niu_bmac_interrupt(np
);
3616 static void niu_log_device_error(struct niu
*np
, u64 stat
)
3618 dev_err(np
->device
, PFX
"%s: Core device errors ( ",
3621 if (stat
& SYS_ERR_MASK_META2
)
3623 if (stat
& SYS_ERR_MASK_META1
)
3625 if (stat
& SYS_ERR_MASK_PEU
)
3627 if (stat
& SYS_ERR_MASK_TXC
)
3629 if (stat
& SYS_ERR_MASK_RDMC
)
3631 if (stat
& SYS_ERR_MASK_TDMC
)
3633 if (stat
& SYS_ERR_MASK_ZCP
)
3635 if (stat
& SYS_ERR_MASK_FFLP
)
3637 if (stat
& SYS_ERR_MASK_IPP
)
3639 if (stat
& SYS_ERR_MASK_MAC
)
3641 if (stat
& SYS_ERR_MASK_SMX
)
3647 static int niu_device_error(struct niu
*np
)
3649 u64 stat
= nr64(SYS_ERR_STAT
);
3651 dev_err(np
->device
, PFX
"%s: Core device error, stat[%llx]\n",
3652 np
->dev
->name
, (unsigned long long) stat
);
3654 niu_log_device_error(np
, stat
);
3659 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
3660 u64 v0
, u64 v1
, u64 v2
)
3669 if (v1
& 0x00000000ffffffffULL
) {
3670 u32 rx_vec
= (v1
& 0xffffffff);
3672 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3673 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3675 if (rx_vec
& (1 << rp
->rx_channel
)) {
3676 int r
= niu_rx_error(np
, rp
);
3681 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3682 RX_DMA_CTL_STAT_MEX
);
3687 if (v1
& 0x7fffffff00000000ULL
) {
3688 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
3690 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3691 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3693 if (tx_vec
& (1 << rp
->tx_channel
)) {
3694 int r
= niu_tx_error(np
, rp
);
3700 if ((v0
| v1
) & 0x8000000000000000ULL
) {
3701 int r
= niu_mif_interrupt(np
);
3707 int r
= niu_mac_interrupt(np
);
3712 int r
= niu_device_error(np
);
3719 niu_enable_interrupts(np
, 0);
3724 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
3727 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3728 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3730 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
3731 RX_DMA_CTL_STAT_RCRTO
);
3732 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
3734 niudbg(INTR
, "%s: rxchan_intr stat[%llx]\n",
3735 np
->dev
->name
, (unsigned long long) stat
);
3738 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
3741 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
3743 niudbg(INTR
, "%s: txchan_intr cs[%llx]\n",
3744 np
->dev
->name
, (unsigned long long) rp
->tx_cs
);
3747 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
3749 struct niu_parent
*parent
= np
->parent
;
3753 tx_vec
= (v0
>> 32);
3754 rx_vec
= (v0
& 0xffffffff);
3756 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3757 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3758 int ldn
= LDN_RXDMA(rp
->rx_channel
);
3760 if (parent
->ldg_map
[ldn
] != ldg
)
3763 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
3764 if (rx_vec
& (1 << rp
->rx_channel
))
3765 niu_rxchan_intr(np
, rp
, ldn
);
3768 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3769 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3770 int ldn
= LDN_TXDMA(rp
->tx_channel
);
3772 if (parent
->ldg_map
[ldn
] != ldg
)
3775 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
3776 if (tx_vec
& (1 << rp
->tx_channel
))
3777 niu_txchan_intr(np
, rp
, ldn
);
3781 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
3782 u64 v0
, u64 v1
, u64 v2
)
3784 if (likely(netif_rx_schedule_prep(np
->dev
, &lp
->napi
))) {
3788 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
3789 __netif_rx_schedule(np
->dev
, &lp
->napi
);
3793 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
3795 struct niu_ldg
*lp
= dev_id
;
3796 struct niu
*np
= lp
->np
;
3797 int ldg
= lp
->ldg_num
;
3798 unsigned long flags
;
3801 if (netif_msg_intr(np
))
3802 printk(KERN_DEBUG PFX
"niu_interrupt() ldg[%p](%d) ",
3805 spin_lock_irqsave(&np
->lock
, flags
);
3807 v0
= nr64(LDSV0(ldg
));
3808 v1
= nr64(LDSV1(ldg
));
3809 v2
= nr64(LDSV2(ldg
));
3811 if (netif_msg_intr(np
))
3812 printk("v0[%llx] v1[%llx] v2[%llx]\n",
3813 (unsigned long long) v0
,
3814 (unsigned long long) v1
,
3815 (unsigned long long) v2
);
3817 if (unlikely(!v0
&& !v1
&& !v2
)) {
3818 spin_unlock_irqrestore(&np
->lock
, flags
);
3822 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
3823 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
3827 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
3828 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
3830 niu_ldg_rearm(np
, lp
, 1);
3832 spin_unlock_irqrestore(&np
->lock
, flags
);
3837 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
3840 np
->ops
->free_coherent(np
->device
,
3841 sizeof(struct rxdma_mailbox
),
3842 rp
->mbox
, rp
->mbox_dma
);
3846 np
->ops
->free_coherent(np
->device
,
3847 MAX_RCR_RING_SIZE
* sizeof(__le64
),
3848 rp
->rcr
, rp
->rcr_dma
);
3850 rp
->rcr_table_size
= 0;
3854 niu_rbr_free(np
, rp
);
3856 np
->ops
->free_coherent(np
->device
,
3857 MAX_RBR_RING_SIZE
* sizeof(__le32
),
3858 rp
->rbr
, rp
->rbr_dma
);
3860 rp
->rbr_table_size
= 0;
3867 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
3870 np
->ops
->free_coherent(np
->device
,
3871 sizeof(struct txdma_mailbox
),
3872 rp
->mbox
, rp
->mbox_dma
);
3878 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
3879 if (rp
->tx_buffs
[i
].skb
)
3880 (void) release_tx_packet(np
, rp
, i
);
3883 np
->ops
->free_coherent(np
->device
,
3884 MAX_TX_RING_SIZE
* sizeof(__le64
),
3885 rp
->descr
, rp
->descr_dma
);
3894 static void niu_free_channels(struct niu
*np
)
3899 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3900 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3902 niu_free_rx_ring_info(np
, rp
);
3904 kfree(np
->rx_rings
);
3905 np
->rx_rings
= NULL
;
3906 np
->num_rx_rings
= 0;
3910 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3911 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3913 niu_free_tx_ring_info(np
, rp
);
3915 kfree(np
->tx_rings
);
3916 np
->tx_rings
= NULL
;
3917 np
->num_tx_rings
= 0;
3921 static int niu_alloc_rx_ring_info(struct niu
*np
,
3922 struct rx_ring_info
*rp
)
3924 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
3926 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
3931 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
3932 sizeof(struct rxdma_mailbox
),
3933 &rp
->mbox_dma
, GFP_KERNEL
);
3936 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
3937 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3938 "RXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
3942 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
3943 MAX_RCR_RING_SIZE
* sizeof(__le64
),
3944 &rp
->rcr_dma
, GFP_KERNEL
);
3947 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
3948 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3949 "RXDMA RCR table %p\n", np
->dev
->name
, rp
->rcr
);
3952 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
3955 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
3956 MAX_RBR_RING_SIZE
* sizeof(__le32
),
3957 &rp
->rbr_dma
, GFP_KERNEL
);
3960 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
3961 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3962 "RXDMA RBR table %p\n", np
->dev
->name
, rp
->rbr
);
3965 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
3967 rp
->rbr_pending
= 0;
3972 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
3974 int mtu
= np
->dev
->mtu
;
3976 /* These values are recommended by the HW designers for fair
3977 * utilization of DRR amongst the rings.
3979 rp
->max_burst
= mtu
+ 32;
3980 if (rp
->max_burst
> 4096)
3981 rp
->max_burst
= 4096;
3984 static int niu_alloc_tx_ring_info(struct niu
*np
,
3985 struct tx_ring_info
*rp
)
3987 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
3989 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
3990 sizeof(struct txdma_mailbox
),
3991 &rp
->mbox_dma
, GFP_KERNEL
);
3994 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
3995 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3996 "TXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4000 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4001 MAX_TX_RING_SIZE
* sizeof(__le64
),
4002 &rp
->descr_dma
, GFP_KERNEL
);
4005 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4006 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4007 "TXDMA descr table %p\n", np
->dev
->name
, rp
->descr
);
4011 rp
->pending
= MAX_TX_RING_SIZE
;
4016 /* XXX make these configurable... XXX */
4017 rp
->mark_freq
= rp
->pending
/ 4;
4019 niu_set_max_burst(np
, rp
);
4024 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4028 bss
= min(PAGE_SHIFT
, 15);
4030 rp
->rbr_block_size
= 1 << bss
;
4031 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4033 rp
->rbr_sizes
[0] = 256;
4034 rp
->rbr_sizes
[1] = 1024;
4035 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4036 switch (PAGE_SIZE
) {
4038 rp
->rbr_sizes
[2] = 4096;
4042 rp
->rbr_sizes
[2] = 8192;
4046 rp
->rbr_sizes
[2] = 2048;
4048 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4051 static int niu_alloc_channels(struct niu
*np
)
4053 struct niu_parent
*parent
= np
->parent
;
4054 int first_rx_channel
, first_tx_channel
;
4058 first_rx_channel
= first_tx_channel
= 0;
4059 for (i
= 0; i
< port
; i
++) {
4060 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4061 first_tx_channel
+= parent
->txchan_per_port
[i
];
4064 np
->num_rx_rings
= parent
->rxchan_per_port
[port
];
4065 np
->num_tx_rings
= parent
->txchan_per_port
[port
];
4067 np
->dev
->real_num_tx_queues
= np
->num_tx_rings
;
4069 np
->rx_rings
= kzalloc(np
->num_rx_rings
* sizeof(struct rx_ring_info
),
4075 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4076 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4079 rp
->rx_channel
= first_rx_channel
+ i
;
4081 err
= niu_alloc_rx_ring_info(np
, rp
);
4085 niu_size_rbr(np
, rp
);
4087 /* XXX better defaults, configurable, etc... XXX */
4088 rp
->nonsyn_window
= 64;
4089 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4090 rp
->syn_window
= 64;
4091 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4092 rp
->rcr_pkt_threshold
= 16;
4093 rp
->rcr_timeout
= 8;
4094 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4095 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4096 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4098 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4103 np
->tx_rings
= kzalloc(np
->num_tx_rings
* sizeof(struct tx_ring_info
),
4109 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4110 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4113 rp
->tx_channel
= first_tx_channel
+ i
;
4115 err
= niu_alloc_tx_ring_info(np
, rp
);
4123 niu_free_channels(np
);
4127 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4131 while (--limit
> 0) {
4132 u64 val
= nr64(TX_CS(channel
));
4133 if (val
& TX_CS_SNG_STATE
)
4139 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4141 u64 val
= nr64(TX_CS(channel
));
4143 val
|= TX_CS_STOP_N_GO
;
4144 nw64(TX_CS(channel
), val
);
4146 return niu_tx_cs_sng_poll(np
, channel
);
4149 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4153 while (--limit
> 0) {
4154 u64 val
= nr64(TX_CS(channel
));
4155 if (!(val
& TX_CS_RST
))
4161 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4163 u64 val
= nr64(TX_CS(channel
));
4167 nw64(TX_CS(channel
), val
);
4169 err
= niu_tx_cs_reset_poll(np
, channel
);
4171 nw64(TX_RING_KICK(channel
), 0);
4176 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4180 nw64(TX_LOG_MASK1(channel
), 0);
4181 nw64(TX_LOG_VAL1(channel
), 0);
4182 nw64(TX_LOG_MASK2(channel
), 0);
4183 nw64(TX_LOG_VAL2(channel
), 0);
4184 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4185 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4186 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4188 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4189 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4190 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4192 /* XXX TXDMA 32bit mode? XXX */
4197 static void niu_txc_enable_port(struct niu
*np
, int on
)
4199 unsigned long flags
;
4202 niu_lock_parent(np
, flags
);
4203 val
= nr64(TXC_CONTROL
);
4204 mask
= (u64
)1 << np
->port
;
4206 val
|= TXC_CONTROL_ENABLE
| mask
;
4209 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4210 val
&= ~TXC_CONTROL_ENABLE
;
4212 nw64(TXC_CONTROL
, val
);
4213 niu_unlock_parent(np
, flags
);
4216 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4218 unsigned long flags
;
4221 niu_lock_parent(np
, flags
);
4222 val
= nr64(TXC_INT_MASK
);
4223 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4224 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4225 niu_unlock_parent(np
, flags
);
4228 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4235 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4236 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4238 nw64(TXC_PORT_DMA(np
->port
), val
);
4241 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4243 int err
, channel
= rp
->tx_channel
;
4246 err
= niu_tx_channel_stop(np
, channel
);
4250 err
= niu_tx_channel_reset(np
, channel
);
4254 err
= niu_tx_channel_lpage_init(np
, channel
);
4258 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4259 nw64(TX_ENT_MSK(channel
), 0);
4261 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4262 TX_RNG_CFIG_STADDR
)) {
4263 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4264 "DMA addr (%llx) is not aligned.\n",
4265 np
->dev
->name
, channel
,
4266 (unsigned long long) rp
->descr_dma
);
4270 /* The length field in TX_RNG_CFIG is measured in 64-byte
4271 * blocks. rp->pending is the number of TX descriptors in
4272 * our ring, 8 bytes each, thus we divide by 8 bytes more
4273 * to get the proper value the chip wants.
4275 ring_len
= (rp
->pending
/ 8);
4277 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4279 nw64(TX_RNG_CFIG(channel
), val
);
4281 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4282 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4283 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4284 "MBOX addr (%llx) is has illegal bits.\n",
4285 np
->dev
->name
, channel
,
4286 (unsigned long long) rp
->mbox_dma
);
4289 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4290 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4292 nw64(TX_CS(channel
), 0);
4294 rp
->last_pkt_cnt
= 0;
4299 static void niu_init_rdc_groups(struct niu
*np
)
4301 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4302 int i
, first_table_num
= tp
->first_table_num
;
4304 for (i
= 0; i
< tp
->num_tables
; i
++) {
4305 struct rdc_table
*tbl
= &tp
->tables
[i
];
4306 int this_table
= first_table_num
+ i
;
4309 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4310 nw64(RDC_TBL(this_table
, slot
),
4311 tbl
->rxdma_channel
[slot
]);
4314 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4317 static void niu_init_drr_weight(struct niu
*np
)
4319 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4324 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4329 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4332 nw64(PT_DRR_WT(np
->port
), val
);
4335 static int niu_init_hostinfo(struct niu
*np
)
4337 struct niu_parent
*parent
= np
->parent
;
4338 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4339 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4340 int first_rdc_table
= tp
->first_table_num
;
4342 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4346 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4350 for (i
= 0; i
< num_alt
; i
++) {
4351 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4359 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4361 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4362 RXDMA_CFIG1_RST
, 1000, 10,
4366 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4370 nw64(RX_LOG_MASK1(channel
), 0);
4371 nw64(RX_LOG_VAL1(channel
), 0);
4372 nw64(RX_LOG_MASK2(channel
), 0);
4373 nw64(RX_LOG_VAL2(channel
), 0);
4374 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4375 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4376 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4378 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4379 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4380 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4385 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4389 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4390 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4391 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4392 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4393 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4396 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4400 switch (rp
->rbr_block_size
) {
4402 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4405 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4408 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4411 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4416 val
|= RBR_CFIG_B_VLD2
;
4417 switch (rp
->rbr_sizes
[2]) {
4419 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4422 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4425 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4428 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4434 val
|= RBR_CFIG_B_VLD1
;
4435 switch (rp
->rbr_sizes
[1]) {
4437 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4440 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4443 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4446 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4452 val
|= RBR_CFIG_B_VLD0
;
4453 switch (rp
->rbr_sizes
[0]) {
4455 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4458 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4461 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4464 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4475 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4477 u64 val
= nr64(RXDMA_CFIG1(channel
));
4481 val
|= RXDMA_CFIG1_EN
;
4483 val
&= ~RXDMA_CFIG1_EN
;
4484 nw64(RXDMA_CFIG1(channel
), val
);
4487 while (--limit
> 0) {
4488 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4497 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4499 int err
, channel
= rp
->rx_channel
;
4502 err
= niu_rx_channel_reset(np
, channel
);
4506 err
= niu_rx_channel_lpage_init(np
, channel
);
4510 niu_rx_channel_wred_init(np
, rp
);
4512 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4513 nw64(RX_DMA_CTL_STAT(channel
),
4514 (RX_DMA_CTL_STAT_MEX
|
4515 RX_DMA_CTL_STAT_RCRTHRES
|
4516 RX_DMA_CTL_STAT_RCRTO
|
4517 RX_DMA_CTL_STAT_RBR_EMPTY
));
4518 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4519 nw64(RXDMA_CFIG2(channel
), (rp
->mbox_dma
& 0x00000000ffffffc0));
4520 nw64(RBR_CFIG_A(channel
),
4521 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4522 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4523 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4526 nw64(RBR_CFIG_B(channel
), val
);
4527 nw64(RCRCFIG_A(channel
),
4528 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4529 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4530 nw64(RCRCFIG_B(channel
),
4531 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4533 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4535 err
= niu_enable_rx_channel(np
, channel
, 1);
4539 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4541 val
= nr64(RX_DMA_CTL_STAT(channel
));
4542 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4543 nw64(RX_DMA_CTL_STAT(channel
), val
);
4548 static int niu_init_rx_channels(struct niu
*np
)
4550 unsigned long flags
;
4551 u64 seed
= jiffies_64
;
4554 niu_lock_parent(np
, flags
);
4555 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
4556 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
4557 niu_unlock_parent(np
, flags
);
4559 /* XXX RXDMA 32bit mode? XXX */
4561 niu_init_rdc_groups(np
);
4562 niu_init_drr_weight(np
);
4564 err
= niu_init_hostinfo(np
);
4568 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4569 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4571 err
= niu_init_one_rx_channel(np
, rp
);
4579 static int niu_set_ip_frag_rule(struct niu
*np
)
4581 struct niu_parent
*parent
= np
->parent
;
4582 struct niu_classifier
*cp
= &np
->clas
;
4583 struct niu_tcam_entry
*tp
;
4586 /* XXX fix this allocation scheme XXX */
4587 index
= cp
->tcam_index
;
4588 tp
= &parent
->tcam
[index
];
4590 /* Note that the noport bit is the same in both ipv4 and
4591 * ipv6 format TCAM entries.
4593 memset(tp
, 0, sizeof(*tp
));
4594 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
4595 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
4596 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
4597 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
4598 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
4601 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
4608 static int niu_init_classifier_hw(struct niu
*np
)
4610 struct niu_parent
*parent
= np
->parent
;
4611 struct niu_classifier
*cp
= &np
->clas
;
4614 nw64(H1POLY
, cp
->h1_init
);
4615 nw64(H2POLY
, cp
->h2_init
);
4617 err
= niu_init_hostinfo(np
);
4621 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
4622 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
4624 vlan_tbl_write(np
, i
, np
->port
,
4625 vp
->vlan_pref
, vp
->rdc_num
);
4628 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
4629 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
4631 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
4632 ap
->rdc_num
, ap
->mac_pref
);
4637 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
4638 int index
= i
- CLASS_CODE_USER_PROG1
;
4640 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
4643 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
4648 err
= niu_set_ip_frag_rule(np
);
4657 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
4659 nw64(ZCP_RAM_DATA0
, data
[0]);
4660 nw64(ZCP_RAM_DATA1
, data
[1]);
4661 nw64(ZCP_RAM_DATA2
, data
[2]);
4662 nw64(ZCP_RAM_DATA3
, data
[3]);
4663 nw64(ZCP_RAM_DATA4
, data
[4]);
4664 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
4666 (ZCP_RAM_ACC_WRITE
|
4667 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4668 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4670 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4674 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
4678 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4681 dev_err(np
->device
, PFX
"%s: ZCP read busy won't clear, "
4682 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4683 (unsigned long long) nr64(ZCP_RAM_ACC
));
4689 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4690 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4692 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4695 dev_err(np
->device
, PFX
"%s: ZCP read busy2 won't clear, "
4696 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4697 (unsigned long long) nr64(ZCP_RAM_ACC
));
4701 data
[0] = nr64(ZCP_RAM_DATA0
);
4702 data
[1] = nr64(ZCP_RAM_DATA1
);
4703 data
[2] = nr64(ZCP_RAM_DATA2
);
4704 data
[3] = nr64(ZCP_RAM_DATA3
);
4705 data
[4] = nr64(ZCP_RAM_DATA4
);
4710 static void niu_zcp_cfifo_reset(struct niu
*np
)
4712 u64 val
= nr64(RESET_CFIFO
);
4714 val
|= RESET_CFIFO_RST(np
->port
);
4715 nw64(RESET_CFIFO
, val
);
4718 val
&= ~RESET_CFIFO_RST(np
->port
);
4719 nw64(RESET_CFIFO
, val
);
4722 static int niu_init_zcp(struct niu
*np
)
4724 u64 data
[5], rbuf
[5];
4727 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
4728 if (np
->port
== 0 || np
->port
== 1)
4729 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
4731 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
4733 max
= NIU_CFIFO_ENTRIES
;
4741 for (i
= 0; i
< max
; i
++) {
4742 err
= niu_zcp_write(np
, i
, data
);
4745 err
= niu_zcp_read(np
, i
, rbuf
);
4750 niu_zcp_cfifo_reset(np
);
4751 nw64(CFIFO_ECC(np
->port
), 0);
4752 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
4753 (void) nr64(ZCP_INT_STAT
);
4754 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
4759 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
4761 u64 val
= nr64_ipp(IPP_CFIG
);
4763 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
4764 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
4765 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
4766 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
4767 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
4768 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
4769 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
4770 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
4773 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
4775 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
4776 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
4777 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
4778 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
4779 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
4780 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
4783 static int niu_ipp_reset(struct niu
*np
)
4785 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
4786 1000, 100, "IPP_CFIG");
4789 static int niu_init_ipp(struct niu
*np
)
4791 u64 data
[5], rbuf
[5], val
;
4794 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
4795 if (np
->port
== 0 || np
->port
== 1)
4796 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
4798 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
4800 max
= NIU_DFIFO_ENTRIES
;
4808 for (i
= 0; i
< max
; i
++) {
4809 niu_ipp_write(np
, i
, data
);
4810 niu_ipp_read(np
, i
, rbuf
);
4813 (void) nr64_ipp(IPP_INT_STAT
);
4814 (void) nr64_ipp(IPP_INT_STAT
);
4816 err
= niu_ipp_reset(np
);
4820 (void) nr64_ipp(IPP_PKT_DIS
);
4821 (void) nr64_ipp(IPP_BAD_CS_CNT
);
4822 (void) nr64_ipp(IPP_ECC
);
4824 (void) nr64_ipp(IPP_INT_STAT
);
4826 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
4828 val
= nr64_ipp(IPP_CFIG
);
4829 val
&= ~IPP_CFIG_IP_MAX_PKT
;
4830 val
|= (IPP_CFIG_IPP_ENABLE
|
4831 IPP_CFIG_DFIFO_ECC_EN
|
4832 IPP_CFIG_DROP_BAD_CRC
|
4834 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
4835 nw64_ipp(IPP_CFIG
, val
);
4840 static void niu_handle_led(struct niu
*np
, int status
)
4843 val
= nr64_mac(XMAC_CONFIG
);
4845 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
4846 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
4848 val
|= XMAC_CONFIG_LED_POLARITY
;
4849 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
4851 val
|= XMAC_CONFIG_FORCE_LED_ON
;
4852 val
&= ~XMAC_CONFIG_LED_POLARITY
;
4856 nw64_mac(XMAC_CONFIG
, val
);
4859 static void niu_init_xif_xmac(struct niu
*np
)
4861 struct niu_link_config
*lp
= &np
->link_config
;
4864 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
4865 val
= nr64(MIF_CONFIG
);
4866 val
|= MIF_CONFIG_ATCA_GE
;
4867 nw64(MIF_CONFIG
, val
);
4870 val
= nr64_mac(XMAC_CONFIG
);
4871 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
4873 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
4875 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
4876 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
4877 val
|= XMAC_CONFIG_LOOPBACK
;
4879 val
&= ~XMAC_CONFIG_LOOPBACK
;
4882 if (np
->flags
& NIU_FLAGS_10G
) {
4883 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
4885 val
|= XMAC_CONFIG_LFS_DISABLE
;
4886 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
4887 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
4888 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
4890 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
4893 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
4895 if (lp
->active_speed
== SPEED_100
)
4896 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
4898 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
4900 nw64_mac(XMAC_CONFIG
, val
);
4902 val
= nr64_mac(XMAC_CONFIG
);
4903 val
&= ~XMAC_CONFIG_MODE_MASK
;
4904 if (np
->flags
& NIU_FLAGS_10G
) {
4905 val
|= XMAC_CONFIG_MODE_XGMII
;
4907 if (lp
->active_speed
== SPEED_100
)
4908 val
|= XMAC_CONFIG_MODE_MII
;
4910 val
|= XMAC_CONFIG_MODE_GMII
;
4913 nw64_mac(XMAC_CONFIG
, val
);
4916 static void niu_init_xif_bmac(struct niu
*np
)
4918 struct niu_link_config
*lp
= &np
->link_config
;
4921 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
4923 if (lp
->loopback_mode
== LOOPBACK_MAC
)
4924 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
4926 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
4928 if (lp
->active_speed
== SPEED_1000
)
4929 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
4931 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
4933 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
4934 BMAC_XIF_CONFIG_LED_POLARITY
);
4936 if (!(np
->flags
& NIU_FLAGS_10G
) &&
4937 !(np
->flags
& NIU_FLAGS_FIBER
) &&
4938 lp
->active_speed
== SPEED_100
)
4939 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
4941 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
4943 nw64_mac(BMAC_XIF_CONFIG
, val
);
4946 static void niu_init_xif(struct niu
*np
)
4948 if (np
->flags
& NIU_FLAGS_XMAC
)
4949 niu_init_xif_xmac(np
);
4951 niu_init_xif_bmac(np
);
4954 static void niu_pcs_mii_reset(struct niu
*np
)
4957 u64 val
= nr64_pcs(PCS_MII_CTL
);
4958 val
|= PCS_MII_CTL_RST
;
4959 nw64_pcs(PCS_MII_CTL
, val
);
4960 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
4962 val
= nr64_pcs(PCS_MII_CTL
);
4966 static void niu_xpcs_reset(struct niu
*np
)
4969 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
4970 val
|= XPCS_CONTROL1_RESET
;
4971 nw64_xpcs(XPCS_CONTROL1
, val
);
4972 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
4974 val
= nr64_xpcs(XPCS_CONTROL1
);
4978 static int niu_init_pcs(struct niu
*np
)
4980 struct niu_link_config
*lp
= &np
->link_config
;
4983 switch (np
->flags
& (NIU_FLAGS_10G
|
4985 NIU_FLAGS_XCVR_SERDES
)) {
4986 case NIU_FLAGS_FIBER
:
4988 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
4989 nw64_pcs(PCS_DPATH_MODE
, 0);
4990 niu_pcs_mii_reset(np
);
4994 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
4995 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
4997 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5000 /* 10G copper or fiber */
5001 val
= nr64_mac(XMAC_CONFIG
);
5002 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5003 nw64_mac(XMAC_CONFIG
, val
);
5007 val
= nr64_xpcs(XPCS_CONTROL1
);
5008 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5009 val
|= XPCS_CONTROL1_LOOPBACK
;
5011 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5012 nw64_xpcs(XPCS_CONTROL1
, val
);
5014 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5015 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5016 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5020 case NIU_FLAGS_XCVR_SERDES
:
5022 niu_pcs_mii_reset(np
);
5023 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5024 nw64_pcs(PCS_DPATH_MODE
, 0);
5029 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5030 /* 1G RGMII FIBER */
5031 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5032 niu_pcs_mii_reset(np
);
5042 static int niu_reset_tx_xmac(struct niu
*np
)
5044 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5045 (XTXMAC_SW_RST_REG_RS
|
5046 XTXMAC_SW_RST_SOFT_RST
),
5047 1000, 100, "XTXMAC_SW_RST");
5050 static int niu_reset_tx_bmac(struct niu
*np
)
5054 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5056 while (--limit
>= 0) {
5057 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5062 dev_err(np
->device
, PFX
"Port %u TX BMAC would not reset, "
5063 "BTXMAC_SW_RST[%llx]\n",
5065 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5072 static int niu_reset_tx_mac(struct niu
*np
)
5074 if (np
->flags
& NIU_FLAGS_XMAC
)
5075 return niu_reset_tx_xmac(np
);
5077 return niu_reset_tx_bmac(np
);
5080 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5084 val
= nr64_mac(XMAC_MIN
);
5085 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5086 XMAC_MIN_RX_MIN_PKT_SIZE
);
5087 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5088 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5089 nw64_mac(XMAC_MIN
, val
);
5091 nw64_mac(XMAC_MAX
, max
);
5093 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5095 val
= nr64_mac(XMAC_IPG
);
5096 if (np
->flags
& NIU_FLAGS_10G
) {
5097 val
&= ~XMAC_IPG_IPG_XGMII
;
5098 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5100 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5101 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5103 nw64_mac(XMAC_IPG
, val
);
5105 val
= nr64_mac(XMAC_CONFIG
);
5106 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5107 XMAC_CONFIG_STRETCH_MODE
|
5108 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5109 XMAC_CONFIG_TX_ENABLE
);
5110 nw64_mac(XMAC_CONFIG
, val
);
5112 nw64_mac(TXMAC_FRM_CNT
, 0);
5113 nw64_mac(TXMAC_BYTE_CNT
, 0);
5116 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5120 nw64_mac(BMAC_MIN_FRAME
, min
);
5121 nw64_mac(BMAC_MAX_FRAME
, max
);
5123 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5124 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5125 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5127 val
= nr64_mac(BTXMAC_CONFIG
);
5128 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5129 BTXMAC_CONFIG_ENABLE
);
5130 nw64_mac(BTXMAC_CONFIG
, val
);
5133 static void niu_init_tx_mac(struct niu
*np
)
5138 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5143 /* The XMAC_MIN register only accepts values for TX min which
5144 * have the low 3 bits cleared.
5146 BUILD_BUG_ON(min
& 0x7);
5148 if (np
->flags
& NIU_FLAGS_XMAC
)
5149 niu_init_tx_xmac(np
, min
, max
);
5151 niu_init_tx_bmac(np
, min
, max
);
5154 static int niu_reset_rx_xmac(struct niu
*np
)
5158 nw64_mac(XRXMAC_SW_RST
,
5159 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5161 while (--limit
>= 0) {
5162 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5163 XRXMAC_SW_RST_SOFT_RST
)))
5168 dev_err(np
->device
, PFX
"Port %u RX XMAC would not reset, "
5169 "XRXMAC_SW_RST[%llx]\n",
5171 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5178 static int niu_reset_rx_bmac(struct niu
*np
)
5182 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5184 while (--limit
>= 0) {
5185 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5190 dev_err(np
->device
, PFX
"Port %u RX BMAC would not reset, "
5191 "BRXMAC_SW_RST[%llx]\n",
5193 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5200 static int niu_reset_rx_mac(struct niu
*np
)
5202 if (np
->flags
& NIU_FLAGS_XMAC
)
5203 return niu_reset_rx_xmac(np
);
5205 return niu_reset_rx_bmac(np
);
5208 static void niu_init_rx_xmac(struct niu
*np
)
5210 struct niu_parent
*parent
= np
->parent
;
5211 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5212 int first_rdc_table
= tp
->first_table_num
;
5216 nw64_mac(XMAC_ADD_FILT0
, 0);
5217 nw64_mac(XMAC_ADD_FILT1
, 0);
5218 nw64_mac(XMAC_ADD_FILT2
, 0);
5219 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5220 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5221 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5222 nw64_mac(XMAC_HASH_TBL(i
), 0);
5223 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5224 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5225 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5227 val
= nr64_mac(XMAC_CONFIG
);
5228 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5229 XMAC_CONFIG_PROMISCUOUS
|
5230 XMAC_CONFIG_PROMISC_GROUP
|
5231 XMAC_CONFIG_ERR_CHK_DIS
|
5232 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5233 XMAC_CONFIG_RESERVED_MULTICAST
|
5234 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5235 XMAC_CONFIG_ADDR_FILTER_EN
|
5236 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5237 XMAC_CONFIG_STRIP_CRC
|
5238 XMAC_CONFIG_PASS_FLOW_CTRL
|
5239 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5240 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5241 nw64_mac(XMAC_CONFIG
, val
);
5243 nw64_mac(RXMAC_BT_CNT
, 0);
5244 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5245 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5246 nw64_mac(RXMAC_FRAG_CNT
, 0);
5247 nw64_mac(RXMAC_HIST_CNT1
, 0);
5248 nw64_mac(RXMAC_HIST_CNT2
, 0);
5249 nw64_mac(RXMAC_HIST_CNT3
, 0);
5250 nw64_mac(RXMAC_HIST_CNT4
, 0);
5251 nw64_mac(RXMAC_HIST_CNT5
, 0);
5252 nw64_mac(RXMAC_HIST_CNT6
, 0);
5253 nw64_mac(RXMAC_HIST_CNT7
, 0);
5254 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5255 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5256 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5257 nw64_mac(LINK_FAULT_CNT
, 0);
5260 static void niu_init_rx_bmac(struct niu
*np
)
5262 struct niu_parent
*parent
= np
->parent
;
5263 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5264 int first_rdc_table
= tp
->first_table_num
;
5268 nw64_mac(BMAC_ADD_FILT0
, 0);
5269 nw64_mac(BMAC_ADD_FILT1
, 0);
5270 nw64_mac(BMAC_ADD_FILT2
, 0);
5271 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5272 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5273 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5274 nw64_mac(BMAC_HASH_TBL(i
), 0);
5275 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5276 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5277 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5279 val
= nr64_mac(BRXMAC_CONFIG
);
5280 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5281 BRXMAC_CONFIG_STRIP_PAD
|
5282 BRXMAC_CONFIG_STRIP_FCS
|
5283 BRXMAC_CONFIG_PROMISC
|
5284 BRXMAC_CONFIG_PROMISC_GRP
|
5285 BRXMAC_CONFIG_ADDR_FILT_EN
|
5286 BRXMAC_CONFIG_DISCARD_DIS
);
5287 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5288 nw64_mac(BRXMAC_CONFIG
, val
);
5290 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5291 val
|= BMAC_ADDR_CMPEN_EN0
;
5292 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5295 static void niu_init_rx_mac(struct niu
*np
)
5297 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5299 if (np
->flags
& NIU_FLAGS_XMAC
)
5300 niu_init_rx_xmac(np
);
5302 niu_init_rx_bmac(np
);
5305 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5307 u64 val
= nr64_mac(XMAC_CONFIG
);
5310 val
|= XMAC_CONFIG_TX_ENABLE
;
5312 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5313 nw64_mac(XMAC_CONFIG
, val
);
5316 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5318 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5321 val
|= BTXMAC_CONFIG_ENABLE
;
5323 val
&= ~BTXMAC_CONFIG_ENABLE
;
5324 nw64_mac(BTXMAC_CONFIG
, val
);
5327 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5329 if (np
->flags
& NIU_FLAGS_XMAC
)
5330 niu_enable_tx_xmac(np
, on
);
5332 niu_enable_tx_bmac(np
, on
);
5335 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5337 u64 val
= nr64_mac(XMAC_CONFIG
);
5339 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5340 XMAC_CONFIG_PROMISCUOUS
);
5342 if (np
->flags
& NIU_FLAGS_MCAST
)
5343 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5344 if (np
->flags
& NIU_FLAGS_PROMISC
)
5345 val
|= XMAC_CONFIG_PROMISCUOUS
;
5348 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5350 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5351 nw64_mac(XMAC_CONFIG
, val
);
5354 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5356 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5358 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5359 BRXMAC_CONFIG_PROMISC
);
5361 if (np
->flags
& NIU_FLAGS_MCAST
)
5362 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5363 if (np
->flags
& NIU_FLAGS_PROMISC
)
5364 val
|= BRXMAC_CONFIG_PROMISC
;
5367 val
|= BRXMAC_CONFIG_ENABLE
;
5369 val
&= ~BRXMAC_CONFIG_ENABLE
;
5370 nw64_mac(BRXMAC_CONFIG
, val
);
5373 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5375 if (np
->flags
& NIU_FLAGS_XMAC
)
5376 niu_enable_rx_xmac(np
, on
);
5378 niu_enable_rx_bmac(np
, on
);
5381 static int niu_init_mac(struct niu
*np
)
5386 err
= niu_init_pcs(np
);
5390 err
= niu_reset_tx_mac(np
);
5393 niu_init_tx_mac(np
);
5394 err
= niu_reset_rx_mac(np
);
5397 niu_init_rx_mac(np
);
5399 /* This looks hookey but the RX MAC reset we just did will
5400 * undo some of the state we setup in niu_init_tx_mac() so we
5401 * have to call it again. In particular, the RX MAC reset will
5402 * set the XMAC_MAX register back to it's default value.
5404 niu_init_tx_mac(np
);
5405 niu_enable_tx_mac(np
, 1);
5407 niu_enable_rx_mac(np
, 1);
5412 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5414 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5417 static void niu_stop_tx_channels(struct niu
*np
)
5421 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5422 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5424 niu_stop_one_tx_channel(np
, rp
);
5428 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5430 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5433 static void niu_reset_tx_channels(struct niu
*np
)
5437 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5438 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5440 niu_reset_one_tx_channel(np
, rp
);
5444 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5446 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5449 static void niu_stop_rx_channels(struct niu
*np
)
5453 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5454 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5456 niu_stop_one_rx_channel(np
, rp
);
5460 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5462 int channel
= rp
->rx_channel
;
5464 (void) niu_rx_channel_reset(np
, channel
);
5465 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5466 nw64(RX_DMA_CTL_STAT(channel
), 0);
5467 (void) niu_enable_rx_channel(np
, channel
, 0);
5470 static void niu_reset_rx_channels(struct niu
*np
)
5474 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5475 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5477 niu_reset_one_rx_channel(np
, rp
);
5481 static void niu_disable_ipp(struct niu
*np
)
5486 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5487 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5489 while (--limit
>= 0 && (rd
!= wr
)) {
5490 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5491 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5494 (rd
!= 0 && wr
!= 1)) {
5495 dev_err(np
->device
, PFX
"%s: IPP would not quiesce, "
5496 "rd_ptr[%llx] wr_ptr[%llx]\n",
5498 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR
),
5499 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR
));
5502 val
= nr64_ipp(IPP_CFIG
);
5503 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5504 IPP_CFIG_DFIFO_ECC_EN
|
5505 IPP_CFIG_DROP_BAD_CRC
|
5507 nw64_ipp(IPP_CFIG
, val
);
5509 (void) niu_ipp_reset(np
);
5512 static int niu_init_hw(struct niu
*np
)
5516 niudbg(IFUP
, "%s: Initialize TXC\n", np
->dev
->name
);
5517 niu_txc_enable_port(np
, 1);
5518 niu_txc_port_dma_enable(np
, 1);
5519 niu_txc_set_imask(np
, 0);
5521 niudbg(IFUP
, "%s: Initialize TX channels\n", np
->dev
->name
);
5522 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5523 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5525 err
= niu_init_one_tx_channel(np
, rp
);
5530 niudbg(IFUP
, "%s: Initialize RX channels\n", np
->dev
->name
);
5531 err
= niu_init_rx_channels(np
);
5533 goto out_uninit_tx_channels
;
5535 niudbg(IFUP
, "%s: Initialize classifier\n", np
->dev
->name
);
5536 err
= niu_init_classifier_hw(np
);
5538 goto out_uninit_rx_channels
;
5540 niudbg(IFUP
, "%s: Initialize ZCP\n", np
->dev
->name
);
5541 err
= niu_init_zcp(np
);
5543 goto out_uninit_rx_channels
;
5545 niudbg(IFUP
, "%s: Initialize IPP\n", np
->dev
->name
);
5546 err
= niu_init_ipp(np
);
5548 goto out_uninit_rx_channels
;
5550 niudbg(IFUP
, "%s: Initialize MAC\n", np
->dev
->name
);
5551 err
= niu_init_mac(np
);
5553 goto out_uninit_ipp
;
5558 niudbg(IFUP
, "%s: Uninit IPP\n", np
->dev
->name
);
5559 niu_disable_ipp(np
);
5561 out_uninit_rx_channels
:
5562 niudbg(IFUP
, "%s: Uninit RX channels\n", np
->dev
->name
);
5563 niu_stop_rx_channels(np
);
5564 niu_reset_rx_channels(np
);
5566 out_uninit_tx_channels
:
5567 niudbg(IFUP
, "%s: Uninit TX channels\n", np
->dev
->name
);
5568 niu_stop_tx_channels(np
);
5569 niu_reset_tx_channels(np
);
5574 static void niu_stop_hw(struct niu
*np
)
5576 niudbg(IFDOWN
, "%s: Disable interrupts\n", np
->dev
->name
);
5577 niu_enable_interrupts(np
, 0);
5579 niudbg(IFDOWN
, "%s: Disable RX MAC\n", np
->dev
->name
);
5580 niu_enable_rx_mac(np
, 0);
5582 niudbg(IFDOWN
, "%s: Disable IPP\n", np
->dev
->name
);
5583 niu_disable_ipp(np
);
5585 niudbg(IFDOWN
, "%s: Stop TX channels\n", np
->dev
->name
);
5586 niu_stop_tx_channels(np
);
5588 niudbg(IFDOWN
, "%s: Stop RX channels\n", np
->dev
->name
);
5589 niu_stop_rx_channels(np
);
5591 niudbg(IFDOWN
, "%s: Reset TX channels\n", np
->dev
->name
);
5592 niu_reset_tx_channels(np
);
5594 niudbg(IFDOWN
, "%s: Reset RX channels\n", np
->dev
->name
);
5595 niu_reset_rx_channels(np
);
5598 static int niu_request_irq(struct niu
*np
)
5603 for (i
= 0; i
< np
->num_ldg
; i
++) {
5604 struct niu_ldg
*lp
= &np
->ldg
[i
];
5606 err
= request_irq(lp
->irq
, niu_interrupt
,
5607 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
5617 for (j
= 0; j
< i
; j
++) {
5618 struct niu_ldg
*lp
= &np
->ldg
[j
];
5620 free_irq(lp
->irq
, lp
);
5625 static void niu_free_irq(struct niu
*np
)
5629 for (i
= 0; i
< np
->num_ldg
; i
++) {
5630 struct niu_ldg
*lp
= &np
->ldg
[i
];
5632 free_irq(lp
->irq
, lp
);
5636 static void niu_enable_napi(struct niu
*np
)
5640 for (i
= 0; i
< np
->num_ldg
; i
++)
5641 napi_enable(&np
->ldg
[i
].napi
);
5644 static void niu_disable_napi(struct niu
*np
)
5648 for (i
= 0; i
< np
->num_ldg
; i
++)
5649 napi_disable(&np
->ldg
[i
].napi
);
5652 static int niu_open(struct net_device
*dev
)
5654 struct niu
*np
= netdev_priv(dev
);
5657 netif_carrier_off(dev
);
5659 err
= niu_alloc_channels(np
);
5663 err
= niu_enable_interrupts(np
, 0);
5665 goto out_free_channels
;
5667 err
= niu_request_irq(np
);
5669 goto out_free_channels
;
5671 niu_enable_napi(np
);
5673 spin_lock_irq(&np
->lock
);
5675 err
= niu_init_hw(np
);
5677 init_timer(&np
->timer
);
5678 np
->timer
.expires
= jiffies
+ HZ
;
5679 np
->timer
.data
= (unsigned long) np
;
5680 np
->timer
.function
= niu_timer
;
5682 err
= niu_enable_interrupts(np
, 1);
5687 spin_unlock_irq(&np
->lock
);
5690 niu_disable_napi(np
);
5694 netif_tx_start_all_queues(dev
);
5696 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
5697 netif_carrier_on(dev
);
5699 add_timer(&np
->timer
);
5707 niu_free_channels(np
);
5713 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
5715 cancel_work_sync(&np
->reset_task
);
5717 niu_disable_napi(np
);
5718 netif_tx_stop_all_queues(dev
);
5720 del_timer_sync(&np
->timer
);
5722 spin_lock_irq(&np
->lock
);
5726 spin_unlock_irq(&np
->lock
);
5729 static int niu_close(struct net_device
*dev
)
5731 struct niu
*np
= netdev_priv(dev
);
5733 niu_full_shutdown(np
, dev
);
5737 niu_free_channels(np
);
5739 niu_handle_led(np
, 0);
5744 static void niu_sync_xmac_stats(struct niu
*np
)
5746 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
5748 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
5749 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
5751 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
5752 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
5753 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
5754 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
5755 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
5756 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
5757 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
5758 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
5759 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
5760 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
5761 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
5762 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
5763 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
5764 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
5765 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
5766 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
5769 static void niu_sync_bmac_stats(struct niu
*np
)
5771 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
5773 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
5774 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
5776 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
5777 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
5778 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
5779 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
5782 static void niu_sync_mac_stats(struct niu
*np
)
5784 if (np
->flags
& NIU_FLAGS_XMAC
)
5785 niu_sync_xmac_stats(np
);
5787 niu_sync_bmac_stats(np
);
5790 static void niu_get_rx_stats(struct niu
*np
)
5792 unsigned long pkts
, dropped
, errors
, bytes
;
5795 pkts
= dropped
= errors
= bytes
= 0;
5796 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5797 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5799 pkts
+= rp
->rx_packets
;
5800 bytes
+= rp
->rx_bytes
;
5801 dropped
+= rp
->rx_dropped
;
5802 errors
+= rp
->rx_errors
;
5804 np
->net_stats
.rx_packets
= pkts
;
5805 np
->net_stats
.rx_bytes
= bytes
;
5806 np
->net_stats
.rx_dropped
= dropped
;
5807 np
->net_stats
.rx_errors
= errors
;
5810 static void niu_get_tx_stats(struct niu
*np
)
5812 unsigned long pkts
, errors
, bytes
;
5815 pkts
= errors
= bytes
= 0;
5816 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5817 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5819 pkts
+= rp
->tx_packets
;
5820 bytes
+= rp
->tx_bytes
;
5821 errors
+= rp
->tx_errors
;
5823 np
->net_stats
.tx_packets
= pkts
;
5824 np
->net_stats
.tx_bytes
= bytes
;
5825 np
->net_stats
.tx_errors
= errors
;
5828 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
5830 struct niu
*np
= netdev_priv(dev
);
5832 niu_get_rx_stats(np
);
5833 niu_get_tx_stats(np
);
5835 return &np
->net_stats
;
5838 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
5842 for (i
= 0; i
< 16; i
++)
5843 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
5846 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
5850 for (i
= 0; i
< 16; i
++)
5851 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
5854 static void niu_load_hash(struct niu
*np
, u16
*hash
)
5856 if (np
->flags
& NIU_FLAGS_XMAC
)
5857 niu_load_hash_xmac(np
, hash
);
5859 niu_load_hash_bmac(np
, hash
);
5862 static void niu_set_rx_mode(struct net_device
*dev
)
5864 struct niu
*np
= netdev_priv(dev
);
5865 int i
, alt_cnt
, err
;
5866 struct dev_addr_list
*addr
;
5867 unsigned long flags
;
5868 u16 hash
[16] = { 0, };
5870 spin_lock_irqsave(&np
->lock
, flags
);
5871 niu_enable_rx_mac(np
, 0);
5873 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
5874 if (dev
->flags
& IFF_PROMISC
)
5875 np
->flags
|= NIU_FLAGS_PROMISC
;
5876 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 0))
5877 np
->flags
|= NIU_FLAGS_MCAST
;
5879 alt_cnt
= dev
->uc_count
;
5880 if (alt_cnt
> niu_num_alt_addr(np
)) {
5882 np
->flags
|= NIU_FLAGS_PROMISC
;
5888 for (addr
= dev
->uc_list
; addr
; addr
= addr
->next
) {
5889 err
= niu_set_alt_mac(np
, index
,
5892 printk(KERN_WARNING PFX
"%s: Error %d "
5893 "adding alt mac %d\n",
5894 dev
->name
, err
, index
);
5895 err
= niu_enable_alt_mac(np
, index
, 1);
5897 printk(KERN_WARNING PFX
"%s: Error %d "
5898 "enabling alt mac %d\n",
5899 dev
->name
, err
, index
);
5905 if (np
->flags
& NIU_FLAGS_XMAC
)
5909 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
5910 err
= niu_enable_alt_mac(np
, i
, 0);
5912 printk(KERN_WARNING PFX
"%s: Error %d "
5913 "disabling alt mac %d\n",
5917 if (dev
->flags
& IFF_ALLMULTI
) {
5918 for (i
= 0; i
< 16; i
++)
5920 } else if (dev
->mc_count
> 0) {
5921 for (addr
= dev
->mc_list
; addr
; addr
= addr
->next
) {
5922 u32 crc
= ether_crc_le(ETH_ALEN
, addr
->da_addr
);
5925 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
5929 if (np
->flags
& NIU_FLAGS_MCAST
)
5930 niu_load_hash(np
, hash
);
5932 niu_enable_rx_mac(np
, 1);
5933 spin_unlock_irqrestore(&np
->lock
, flags
);
5936 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
5938 struct niu
*np
= netdev_priv(dev
);
5939 struct sockaddr
*addr
= p
;
5940 unsigned long flags
;
5942 if (!is_valid_ether_addr(addr
->sa_data
))
5945 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
5947 if (!netif_running(dev
))
5950 spin_lock_irqsave(&np
->lock
, flags
);
5951 niu_enable_rx_mac(np
, 0);
5952 niu_set_primary_mac(np
, dev
->dev_addr
);
5953 niu_enable_rx_mac(np
, 1);
5954 spin_unlock_irqrestore(&np
->lock
, flags
);
5959 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5964 static void niu_netif_stop(struct niu
*np
)
5966 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5968 niu_disable_napi(np
);
5970 netif_tx_disable(np
->dev
);
5973 static void niu_netif_start(struct niu
*np
)
5975 /* NOTE: unconditional netif_wake_queue is only appropriate
5976 * so long as all callers are assured to have free tx slots
5977 * (such as after niu_init_hw).
5979 netif_tx_wake_all_queues(np
->dev
);
5981 niu_enable_napi(np
);
5983 niu_enable_interrupts(np
, 1);
5986 static void niu_reset_buffers(struct niu
*np
)
5991 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5992 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5994 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
5997 page
= rp
->rxhash
[j
];
6000 (struct page
*) page
->mapping
;
6001 u64 base
= page
->index
;
6002 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6003 rp
->rbr
[k
++] = cpu_to_le32(base
);
6007 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6008 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6013 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6015 rp
->rbr_pending
= 0;
6016 rp
->rbr_refill_pending
= 0;
6020 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6021 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6023 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6024 if (rp
->tx_buffs
[j
].skb
)
6025 (void) release_tx_packet(np
, rp
, j
);
6028 rp
->pending
= MAX_TX_RING_SIZE
;
6036 static void niu_reset_task(struct work_struct
*work
)
6038 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6039 unsigned long flags
;
6042 spin_lock_irqsave(&np
->lock
, flags
);
6043 if (!netif_running(np
->dev
)) {
6044 spin_unlock_irqrestore(&np
->lock
, flags
);
6048 spin_unlock_irqrestore(&np
->lock
, flags
);
6050 del_timer_sync(&np
->timer
);
6054 spin_lock_irqsave(&np
->lock
, flags
);
6058 spin_unlock_irqrestore(&np
->lock
, flags
);
6060 niu_reset_buffers(np
);
6062 spin_lock_irqsave(&np
->lock
, flags
);
6064 err
= niu_init_hw(np
);
6066 np
->timer
.expires
= jiffies
+ HZ
;
6067 add_timer(&np
->timer
);
6068 niu_netif_start(np
);
6071 spin_unlock_irqrestore(&np
->lock
, flags
);
6074 static void niu_tx_timeout(struct net_device
*dev
)
6076 struct niu
*np
= netdev_priv(dev
);
6078 dev_err(np
->device
, PFX
"%s: Transmit timed out, resetting\n",
6081 schedule_work(&np
->reset_task
);
6084 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6085 u64 mapping
, u64 len
, u64 mark
,
6088 __le64
*desc
= &rp
->descr
[index
];
6090 *desc
= cpu_to_le64(mark
|
6091 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6092 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6093 (mapping
& TX_DESC_SAD
));
6096 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6097 u64 pad_bytes
, u64 len
)
6099 u16 eth_proto
, eth_proto_inner
;
6100 u64 csum_bits
, l3off
, ihl
, ret
;
6104 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6105 eth_proto_inner
= eth_proto
;
6106 if (eth_proto
== ETH_P_8021Q
) {
6107 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6108 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6110 eth_proto_inner
= be16_to_cpu(val
);
6114 switch (skb
->protocol
) {
6115 case __constant_htons(ETH_P_IP
):
6116 ip_proto
= ip_hdr(skb
)->protocol
;
6117 ihl
= ip_hdr(skb
)->ihl
;
6119 case __constant_htons(ETH_P_IPV6
):
6120 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6129 csum_bits
= TXHDR_CSUM_NONE
;
6130 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6133 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6135 (ip_proto
== IPPROTO_UDP
?
6136 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6138 start
= skb_transport_offset(skb
) -
6139 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6140 stuff
= start
+ skb
->csum_offset
;
6142 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6143 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6146 l3off
= skb_network_offset(skb
) -
6147 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6149 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6150 (len
<< TXHDR_LEN_SHIFT
) |
6151 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6152 (ihl
<< TXHDR_IHL_SHIFT
) |
6153 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6154 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6155 (ipv6
? TXHDR_IP_VER
: 0) |
6161 static int niu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6163 struct niu
*np
= netdev_priv(dev
);
6164 unsigned long align
, headroom
;
6165 struct netdev_queue
*txq
;
6166 struct tx_ring_info
*rp
;
6167 struct tx_pkt_hdr
*tp
;
6168 unsigned int len
, nfg
;
6169 struct ethhdr
*ehdr
;
6173 i
= skb_get_queue_mapping(skb
);
6174 rp
= &np
->tx_rings
[i
];
6175 txq
= netdev_get_tx_queue(dev
, i
);
6177 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6178 netif_tx_stop_queue(txq
);
6179 dev_err(np
->device
, PFX
"%s: BUG! Tx ring full when "
6180 "queue awake!\n", dev
->name
);
6182 return NETDEV_TX_BUSY
;
6185 if (skb
->len
< ETH_ZLEN
) {
6186 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6188 if (skb_pad(skb
, pad_bytes
))
6190 skb_put(skb
, pad_bytes
);
6193 len
= sizeof(struct tx_pkt_hdr
) + 15;
6194 if (skb_headroom(skb
) < len
) {
6195 struct sk_buff
*skb_new
;
6197 skb_new
= skb_realloc_headroom(skb
, len
);
6207 align
= ((unsigned long) skb
->data
& (16 - 1));
6208 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6210 ehdr
= (struct ethhdr
*) skb
->data
;
6211 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6213 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6214 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6217 len
= skb_headlen(skb
);
6218 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6219 len
, DMA_TO_DEVICE
);
6223 rp
->tx_buffs
[prod
].skb
= skb
;
6224 rp
->tx_buffs
[prod
].mapping
= mapping
;
6227 if (++rp
->mark_counter
== rp
->mark_freq
) {
6228 rp
->mark_counter
= 0;
6229 mrk
|= TX_DESC_MARK
;
6234 nfg
= skb_shinfo(skb
)->nr_frags
;
6236 tlen
-= MAX_TX_DESC_LEN
;
6241 unsigned int this_len
= len
;
6243 if (this_len
> MAX_TX_DESC_LEN
)
6244 this_len
= MAX_TX_DESC_LEN
;
6246 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6249 prod
= NEXT_TX(rp
, prod
);
6250 mapping
+= this_len
;
6254 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6255 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6258 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6259 frag
->page_offset
, len
,
6262 rp
->tx_buffs
[prod
].skb
= NULL
;
6263 rp
->tx_buffs
[prod
].mapping
= mapping
;
6265 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6267 prod
= NEXT_TX(rp
, prod
);
6270 if (prod
< rp
->prod
)
6271 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6274 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6276 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6277 netif_tx_stop_queue(txq
);
6278 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6279 netif_tx_wake_queue(txq
);
6282 dev
->trans_start
= jiffies
;
6285 return NETDEV_TX_OK
;
6293 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6295 struct niu
*np
= netdev_priv(dev
);
6296 int err
, orig_jumbo
, new_jumbo
;
6298 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6301 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6302 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6306 if (!netif_running(dev
) ||
6307 (orig_jumbo
== new_jumbo
))
6310 niu_full_shutdown(np
, dev
);
6312 niu_free_channels(np
);
6314 niu_enable_napi(np
);
6316 err
= niu_alloc_channels(np
);
6320 spin_lock_irq(&np
->lock
);
6322 err
= niu_init_hw(np
);
6324 init_timer(&np
->timer
);
6325 np
->timer
.expires
= jiffies
+ HZ
;
6326 np
->timer
.data
= (unsigned long) np
;
6327 np
->timer
.function
= niu_timer
;
6329 err
= niu_enable_interrupts(np
, 1);
6334 spin_unlock_irq(&np
->lock
);
6337 netif_tx_start_all_queues(dev
);
6338 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6339 netif_carrier_on(dev
);
6341 add_timer(&np
->timer
);
6347 static void niu_get_drvinfo(struct net_device
*dev
,
6348 struct ethtool_drvinfo
*info
)
6350 struct niu
*np
= netdev_priv(dev
);
6351 struct niu_vpd
*vpd
= &np
->vpd
;
6353 strcpy(info
->driver
, DRV_MODULE_NAME
);
6354 strcpy(info
->version
, DRV_MODULE_VERSION
);
6355 sprintf(info
->fw_version
, "%d.%d",
6356 vpd
->fcode_major
, vpd
->fcode_minor
);
6357 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6358 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6361 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6363 struct niu
*np
= netdev_priv(dev
);
6364 struct niu_link_config
*lp
;
6366 lp
= &np
->link_config
;
6368 memset(cmd
, 0, sizeof(*cmd
));
6369 cmd
->phy_address
= np
->phy_addr
;
6370 cmd
->supported
= lp
->supported
;
6371 cmd
->advertising
= lp
->advertising
;
6372 cmd
->autoneg
= lp
->autoneg
;
6373 cmd
->speed
= lp
->active_speed
;
6374 cmd
->duplex
= lp
->active_duplex
;
6379 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6384 static u32
niu_get_msglevel(struct net_device
*dev
)
6386 struct niu
*np
= netdev_priv(dev
);
6387 return np
->msg_enable
;
6390 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6392 struct niu
*np
= netdev_priv(dev
);
6393 np
->msg_enable
= value
;
6396 static int niu_get_eeprom_len(struct net_device
*dev
)
6398 struct niu
*np
= netdev_priv(dev
);
6400 return np
->eeprom_len
;
6403 static int niu_get_eeprom(struct net_device
*dev
,
6404 struct ethtool_eeprom
*eeprom
, u8
*data
)
6406 struct niu
*np
= netdev_priv(dev
);
6407 u32 offset
, len
, val
;
6409 offset
= eeprom
->offset
;
6412 if (offset
+ len
< offset
)
6414 if (offset
>= np
->eeprom_len
)
6416 if (offset
+ len
> np
->eeprom_len
)
6417 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6420 u32 b_offset
, b_count
;
6422 b_offset
= offset
& 3;
6423 b_count
= 4 - b_offset
;
6427 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6428 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6434 val
= nr64(ESPC_NCR(offset
/ 4));
6435 memcpy(data
, &val
, 4);
6441 val
= nr64(ESPC_NCR(offset
/ 4));
6442 memcpy(data
, &val
, len
);
6447 static int niu_ethflow_to_class(int flow_type
, u64
*class)
6449 switch (flow_type
) {
6451 *class = CLASS_CODE_TCP_IPV4
;
6454 *class = CLASS_CODE_UDP_IPV4
;
6456 case AH_ESP_V4_FLOW
:
6457 *class = CLASS_CODE_AH_ESP_IPV4
;
6460 *class = CLASS_CODE_SCTP_IPV4
;
6463 *class = CLASS_CODE_TCP_IPV6
;
6466 *class = CLASS_CODE_UDP_IPV6
;
6468 case AH_ESP_V6_FLOW
:
6469 *class = CLASS_CODE_AH_ESP_IPV6
;
6472 *class = CLASS_CODE_SCTP_IPV6
;
6481 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
6485 if (flow_key
& FLOW_KEY_PORT
)
6486 ethflow
|= RXH_DEV_PORT
;
6487 if (flow_key
& FLOW_KEY_L2DA
)
6488 ethflow
|= RXH_L2DA
;
6489 if (flow_key
& FLOW_KEY_VLAN
)
6490 ethflow
|= RXH_VLAN
;
6491 if (flow_key
& FLOW_KEY_IPSA
)
6492 ethflow
|= RXH_IP_SRC
;
6493 if (flow_key
& FLOW_KEY_IPDA
)
6494 ethflow
|= RXH_IP_DST
;
6495 if (flow_key
& FLOW_KEY_PROTO
)
6496 ethflow
|= RXH_L3_PROTO
;
6497 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
6498 ethflow
|= RXH_L4_B_0_1
;
6499 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
6500 ethflow
|= RXH_L4_B_2_3
;
6506 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
6510 if (ethflow
& RXH_DEV_PORT
)
6511 key
|= FLOW_KEY_PORT
;
6512 if (ethflow
& RXH_L2DA
)
6513 key
|= FLOW_KEY_L2DA
;
6514 if (ethflow
& RXH_VLAN
)
6515 key
|= FLOW_KEY_VLAN
;
6516 if (ethflow
& RXH_IP_SRC
)
6517 key
|= FLOW_KEY_IPSA
;
6518 if (ethflow
& RXH_IP_DST
)
6519 key
|= FLOW_KEY_IPDA
;
6520 if (ethflow
& RXH_L3_PROTO
)
6521 key
|= FLOW_KEY_PROTO
;
6522 if (ethflow
& RXH_L4_B_0_1
)
6523 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
6524 if (ethflow
& RXH_L4_B_2_3
)
6525 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
6533 static int niu_get_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6535 struct niu
*np
= netdev_priv(dev
);
6540 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6543 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6545 cmd
->data
= RXH_DISCARD
;
6548 cmd
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
6549 CLASS_CODE_USER_PROG1
]);
6553 static int niu_set_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6555 struct niu
*np
= netdev_priv(dev
);
6558 unsigned long flags
;
6560 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6563 if (class < CLASS_CODE_USER_PROG1
||
6564 class > CLASS_CODE_SCTP_IPV6
)
6567 if (cmd
->data
& RXH_DISCARD
) {
6568 niu_lock_parent(np
, flags
);
6569 flow_key
= np
->parent
->tcam_key
[class -
6570 CLASS_CODE_USER_PROG1
];
6571 flow_key
|= TCAM_KEY_DISC
;
6572 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6573 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6574 niu_unlock_parent(np
, flags
);
6577 /* Discard was set before, but is not set now */
6578 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6580 niu_lock_parent(np
, flags
);
6581 flow_key
= np
->parent
->tcam_key
[class -
6582 CLASS_CODE_USER_PROG1
];
6583 flow_key
&= ~TCAM_KEY_DISC
;
6584 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
6586 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
6588 niu_unlock_parent(np
, flags
);
6592 if (!niu_ethflow_to_flowkey(cmd
->data
, &flow_key
))
6595 niu_lock_parent(np
, flags
);
6596 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6597 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6598 niu_unlock_parent(np
, flags
);
6603 static const struct {
6604 const char string
[ETH_GSTRING_LEN
];
6605 } niu_xmac_stat_keys
[] = {
6608 { "tx_fifo_errors" },
6609 { "tx_overflow_errors" },
6610 { "tx_max_pkt_size_errors" },
6611 { "tx_underflow_errors" },
6612 { "rx_local_faults" },
6613 { "rx_remote_faults" },
6614 { "rx_link_faults" },
6615 { "rx_align_errors" },
6627 { "rx_code_violations" },
6628 { "rx_len_errors" },
6629 { "rx_crc_errors" },
6630 { "rx_underflows" },
6632 { "pause_off_state" },
6633 { "pause_on_state" },
6634 { "pause_received" },
6637 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
6639 static const struct {
6640 const char string
[ETH_GSTRING_LEN
];
6641 } niu_bmac_stat_keys
[] = {
6642 { "tx_underflow_errors" },
6643 { "tx_max_pkt_size_errors" },
6648 { "rx_align_errors" },
6649 { "rx_crc_errors" },
6650 { "rx_len_errors" },
6651 { "pause_off_state" },
6652 { "pause_on_state" },
6653 { "pause_received" },
6656 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
6658 static const struct {
6659 const char string
[ETH_GSTRING_LEN
];
6660 } niu_rxchan_stat_keys
[] = {
6668 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
6670 static const struct {
6671 const char string
[ETH_GSTRING_LEN
];
6672 } niu_txchan_stat_keys
[] = {
6679 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
6681 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
6683 struct niu
*np
= netdev_priv(dev
);
6686 if (stringset
!= ETH_SS_STATS
)
6689 if (np
->flags
& NIU_FLAGS_XMAC
) {
6690 memcpy(data
, niu_xmac_stat_keys
,
6691 sizeof(niu_xmac_stat_keys
));
6692 data
+= sizeof(niu_xmac_stat_keys
);
6694 memcpy(data
, niu_bmac_stat_keys
,
6695 sizeof(niu_bmac_stat_keys
));
6696 data
+= sizeof(niu_bmac_stat_keys
);
6698 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6699 memcpy(data
, niu_rxchan_stat_keys
,
6700 sizeof(niu_rxchan_stat_keys
));
6701 data
+= sizeof(niu_rxchan_stat_keys
);
6703 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6704 memcpy(data
, niu_txchan_stat_keys
,
6705 sizeof(niu_txchan_stat_keys
));
6706 data
+= sizeof(niu_txchan_stat_keys
);
6710 static int niu_get_stats_count(struct net_device
*dev
)
6712 struct niu
*np
= netdev_priv(dev
);
6714 return ((np
->flags
& NIU_FLAGS_XMAC
?
6715 NUM_XMAC_STAT_KEYS
:
6716 NUM_BMAC_STAT_KEYS
) +
6717 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
6718 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
));
6721 static void niu_get_ethtool_stats(struct net_device
*dev
,
6722 struct ethtool_stats
*stats
, u64
*data
)
6724 struct niu
*np
= netdev_priv(dev
);
6727 niu_sync_mac_stats(np
);
6728 if (np
->flags
& NIU_FLAGS_XMAC
) {
6729 memcpy(data
, &np
->mac_stats
.xmac
,
6730 sizeof(struct niu_xmac_stats
));
6731 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
6733 memcpy(data
, &np
->mac_stats
.bmac
,
6734 sizeof(struct niu_bmac_stats
));
6735 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
6737 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6738 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6740 data
[0] = rp
->rx_channel
;
6741 data
[1] = rp
->rx_packets
;
6742 data
[2] = rp
->rx_bytes
;
6743 data
[3] = rp
->rx_dropped
;
6744 data
[4] = rp
->rx_errors
;
6747 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6748 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6750 data
[0] = rp
->tx_channel
;
6751 data
[1] = rp
->tx_packets
;
6752 data
[2] = rp
->tx_bytes
;
6753 data
[3] = rp
->tx_errors
;
6758 static u64
niu_led_state_save(struct niu
*np
)
6760 if (np
->flags
& NIU_FLAGS_XMAC
)
6761 return nr64_mac(XMAC_CONFIG
);
6763 return nr64_mac(BMAC_XIF_CONFIG
);
6766 static void niu_led_state_restore(struct niu
*np
, u64 val
)
6768 if (np
->flags
& NIU_FLAGS_XMAC
)
6769 nw64_mac(XMAC_CONFIG
, val
);
6771 nw64_mac(BMAC_XIF_CONFIG
, val
);
6774 static void niu_force_led(struct niu
*np
, int on
)
6778 if (np
->flags
& NIU_FLAGS_XMAC
) {
6780 bit
= XMAC_CONFIG_FORCE_LED_ON
;
6782 reg
= BMAC_XIF_CONFIG
;
6783 bit
= BMAC_XIF_CONFIG_LINK_LED
;
6786 val
= nr64_mac(reg
);
6794 static int niu_phys_id(struct net_device
*dev
, u32 data
)
6796 struct niu
*np
= netdev_priv(dev
);
6800 if (!netif_running(dev
))
6806 orig_led_state
= niu_led_state_save(np
);
6807 for (i
= 0; i
< (data
* 2); i
++) {
6808 int on
= ((i
% 2) == 0);
6810 niu_force_led(np
, on
);
6812 if (msleep_interruptible(500))
6815 niu_led_state_restore(np
, orig_led_state
);
6820 static const struct ethtool_ops niu_ethtool_ops
= {
6821 .get_drvinfo
= niu_get_drvinfo
,
6822 .get_link
= ethtool_op_get_link
,
6823 .get_msglevel
= niu_get_msglevel
,
6824 .set_msglevel
= niu_set_msglevel
,
6825 .get_eeprom_len
= niu_get_eeprom_len
,
6826 .get_eeprom
= niu_get_eeprom
,
6827 .get_settings
= niu_get_settings
,
6828 .set_settings
= niu_set_settings
,
6829 .get_strings
= niu_get_strings
,
6830 .get_stats_count
= niu_get_stats_count
,
6831 .get_ethtool_stats
= niu_get_ethtool_stats
,
6832 .phys_id
= niu_phys_id
,
6833 .get_rxhash
= niu_get_hash_opts
,
6834 .set_rxhash
= niu_set_hash_opts
,
6837 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
6840 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
6842 if (ldn
< 0 || ldn
> LDN_MAX
)
6845 parent
->ldg_map
[ldn
] = ldg
;
6847 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
6848 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
6849 * the firmware, and we're not supposed to change them.
6850 * Validate the mapping, because if it's wrong we probably
6851 * won't get any interrupts and that's painful to debug.
6853 if (nr64(LDG_NUM(ldn
)) != ldg
) {
6854 dev_err(np
->device
, PFX
"Port %u, mis-matched "
6856 "for ldn %d, should be %d is %llu\n",
6858 (unsigned long long) nr64(LDG_NUM(ldn
)));
6862 nw64(LDG_NUM(ldn
), ldg
);
6867 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
6869 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
6873 nw64(LDG_TIMER_RES
, res
);
6878 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
6880 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
6881 (func
< 0 || func
> 3) ||
6882 (vector
< 0 || vector
> 0x1f))
6885 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
6890 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
6892 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
6893 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
6896 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
6900 nw64(ESPC_PIO_STAT
, frame
);
6904 frame
= nr64(ESPC_PIO_STAT
);
6905 if (frame
& ESPC_PIO_STAT_READ_END
)
6908 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
6909 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
6910 (unsigned long long) frame
);
6915 nw64(ESPC_PIO_STAT
, frame
);
6919 frame
= nr64(ESPC_PIO_STAT
);
6920 if (frame
& ESPC_PIO_STAT_READ_END
)
6923 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
6924 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
6925 (unsigned long long) frame
);
6929 frame
= nr64(ESPC_PIO_STAT
);
6930 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
6933 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
6935 int err
= niu_pci_eeprom_read(np
, off
);
6941 err
= niu_pci_eeprom_read(np
, off
+ 1);
6944 val
|= (err
& 0xff);
6949 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
6951 int err
= niu_pci_eeprom_read(np
, off
);
6958 err
= niu_pci_eeprom_read(np
, off
+ 1);
6962 val
|= (err
& 0xff) << 8;
6967 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
6974 for (i
= 0; i
< namebuf_len
; i
++) {
6975 int err
= niu_pci_eeprom_read(np
, off
+ i
);
6982 if (i
>= namebuf_len
)
6988 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
6990 struct niu_vpd
*vpd
= &np
->vpd
;
6991 int len
= strlen(vpd
->version
) + 1;
6992 const char *s
= vpd
->version
;
6995 for (i
= 0; i
< len
- 5; i
++) {
6996 if (!strncmp(s
+ i
, "FCode ", 5))
7003 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
7005 niudbg(PROBE
, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7006 vpd
->fcode_major
, vpd
->fcode_minor
);
7007 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
7008 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
7009 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
7010 np
->flags
|= NIU_FLAGS_VPD_VALID
;
7013 /* ESPC_PIO_EN_ENABLE must be set */
7014 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
7017 unsigned int found_mask
= 0;
7018 #define FOUND_MASK_MODEL 0x00000001
7019 #define FOUND_MASK_BMODEL 0x00000002
7020 #define FOUND_MASK_VERS 0x00000004
7021 #define FOUND_MASK_MAC 0x00000008
7022 #define FOUND_MASK_NMAC 0x00000010
7023 #define FOUND_MASK_PHY 0x00000020
7024 #define FOUND_MASK_ALL 0x0000003f
7026 niudbg(PROBE
, "VPD_SCAN: start[%x] end[%x]\n",
7028 while (start
< end
) {
7029 int len
, err
, instance
, type
, prop_len
;
7034 if (found_mask
== FOUND_MASK_ALL
) {
7035 niu_vpd_parse_version(np
);
7039 err
= niu_pci_eeprom_read(np
, start
+ 2);
7045 instance
= niu_pci_eeprom_read(np
, start
);
7046 type
= niu_pci_eeprom_read(np
, start
+ 3);
7047 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
7048 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
7054 if (!strcmp(namebuf
, "model")) {
7055 prop_buf
= np
->vpd
.model
;
7056 max_len
= NIU_VPD_MODEL_MAX
;
7057 found_mask
|= FOUND_MASK_MODEL
;
7058 } else if (!strcmp(namebuf
, "board-model")) {
7059 prop_buf
= np
->vpd
.board_model
;
7060 max_len
= NIU_VPD_BD_MODEL_MAX
;
7061 found_mask
|= FOUND_MASK_BMODEL
;
7062 } else if (!strcmp(namebuf
, "version")) {
7063 prop_buf
= np
->vpd
.version
;
7064 max_len
= NIU_VPD_VERSION_MAX
;
7065 found_mask
|= FOUND_MASK_VERS
;
7066 } else if (!strcmp(namebuf
, "local-mac-address")) {
7067 prop_buf
= np
->vpd
.local_mac
;
7069 found_mask
|= FOUND_MASK_MAC
;
7070 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
7071 prop_buf
= &np
->vpd
.mac_num
;
7073 found_mask
|= FOUND_MASK_NMAC
;
7074 } else if (!strcmp(namebuf
, "phy-type")) {
7075 prop_buf
= np
->vpd
.phy_type
;
7076 max_len
= NIU_VPD_PHY_TYPE_MAX
;
7077 found_mask
|= FOUND_MASK_PHY
;
7080 if (max_len
&& prop_len
> max_len
) {
7081 dev_err(np
->device
, PFX
"Property '%s' length (%d) is "
7082 "too long.\n", namebuf
, prop_len
);
7087 u32 off
= start
+ 5 + err
;
7090 niudbg(PROBE
, "VPD_SCAN: Reading in property [%s] "
7091 "len[%d]\n", namebuf
, prop_len
);
7092 for (i
= 0; i
< prop_len
; i
++)
7093 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
7102 /* ESPC_PIO_EN_ENABLE must be set */
7103 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
7108 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
7114 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
7115 u32 here
= start
+ offset
;
7118 err
= niu_pci_eeprom_read(np
, here
);
7122 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
7126 here
= start
+ offset
+ 3;
7127 end
= start
+ offset
+ err
;
7131 err
= niu_pci_vpd_scan_props(np
, here
, end
);
7132 if (err
< 0 || err
== 1)
7137 /* ESPC_PIO_EN_ENABLE must be set */
7138 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
7140 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
7143 while (start
< end
) {
7146 /* ROM header signature? */
7147 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7151 /* Apply offset to PCI data structure. */
7152 err
= niu_pci_eeprom_read16(np
, start
+ 23);
7157 /* Check for "PCIR" signature. */
7158 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7161 err
= niu_pci_eeprom_read16(np
, start
+ 2);
7165 /* Check for OBP image type. */
7166 err
= niu_pci_eeprom_read(np
, start
+ 20);
7170 err
= niu_pci_eeprom_read(np
, ret
+ 2);
7174 start
= ret
+ (err
* 512);
7178 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
7183 err
= niu_pci_eeprom_read(np
, ret
+ 0);
7193 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
7194 const char *phy_prop
)
7196 if (!strcmp(phy_prop
, "mif")) {
7197 /* 1G copper, MII */
7198 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7200 np
->mac_xcvr
= MAC_XCVR_MII
;
7201 } else if (!strcmp(phy_prop
, "xgf")) {
7202 /* 10G fiber, XPCS */
7203 np
->flags
|= (NIU_FLAGS_10G
|
7205 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7206 } else if (!strcmp(phy_prop
, "pcs")) {
7208 np
->flags
&= ~NIU_FLAGS_10G
;
7209 np
->flags
|= NIU_FLAGS_FIBER
;
7210 np
->mac_xcvr
= MAC_XCVR_PCS
;
7211 } else if (!strcmp(phy_prop
, "xgc")) {
7212 /* 10G copper, XPCS */
7213 np
->flags
|= NIU_FLAGS_10G
;
7214 np
->flags
&= ~NIU_FLAGS_FIBER
;
7215 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7222 static int niu_pci_vpd_get_nports(struct niu
*np
)
7226 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
7227 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
7228 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
7229 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
7230 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
7232 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
7233 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
7234 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
7235 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
7242 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
7244 struct net_device
*dev
= np
->dev
;
7245 struct niu_vpd
*vpd
= &np
->vpd
;
7248 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
7249 dev_err(np
->device
, PFX
"VPD MAC invalid, "
7250 "falling back to SPROM.\n");
7252 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7256 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
7257 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
7258 np
->flags
|= NIU_FLAGS_10G
;
7259 np
->flags
&= ~NIU_FLAGS_FIBER
;
7260 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
7261 np
->mac_xcvr
= MAC_XCVR_PCS
;
7263 np
->flags
|= NIU_FLAGS_FIBER
;
7264 np
->flags
&= ~NIU_FLAGS_10G
;
7266 if (np
->flags
& NIU_FLAGS_10G
)
7267 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7268 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
7269 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
7270 NIU_FLAGS_HOTPLUG_PHY
);
7271 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
7272 dev_err(np
->device
, PFX
"Illegal phy string [%s].\n",
7274 dev_err(np
->device
, PFX
"Falling back to SPROM.\n");
7275 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7279 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
7281 val8
= dev
->perm_addr
[5];
7282 dev
->perm_addr
[5] += np
->port
;
7283 if (dev
->perm_addr
[5] < val8
)
7284 dev
->perm_addr
[4]++;
7286 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7289 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
7291 struct net_device
*dev
= np
->dev
;
7296 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
7297 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
7300 np
->eeprom_len
= len
;
7302 niudbg(PROBE
, "SPROM: Image size %llu\n", (unsigned long long) val
);
7305 for (i
= 0; i
< len
; i
++) {
7306 val
= nr64(ESPC_NCR(i
));
7307 sum
+= (val
>> 0) & 0xff;
7308 sum
+= (val
>> 8) & 0xff;
7309 sum
+= (val
>> 16) & 0xff;
7310 sum
+= (val
>> 24) & 0xff;
7312 niudbg(PROBE
, "SPROM: Checksum %x\n", (int)(sum
& 0xff));
7313 if ((sum
& 0xff) != 0xab) {
7314 dev_err(np
->device
, PFX
"Bad SPROM checksum "
7315 "(%x, should be 0xab)\n", (int) (sum
& 0xff));
7319 val
= nr64(ESPC_PHY_TYPE
);
7322 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
7323 ESPC_PHY_TYPE_PORT0_SHIFT
;
7326 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
7327 ESPC_PHY_TYPE_PORT1_SHIFT
;
7330 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
7331 ESPC_PHY_TYPE_PORT2_SHIFT
;
7334 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
7335 ESPC_PHY_TYPE_PORT3_SHIFT
;
7338 dev_err(np
->device
, PFX
"Bogus port number %u\n",
7342 niudbg(PROBE
, "SPROM: PHY type %x\n", val8
);
7345 case ESPC_PHY_TYPE_1G_COPPER
:
7346 /* 1G copper, MII */
7347 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7349 np
->mac_xcvr
= MAC_XCVR_MII
;
7352 case ESPC_PHY_TYPE_1G_FIBER
:
7354 np
->flags
&= ~NIU_FLAGS_10G
;
7355 np
->flags
|= NIU_FLAGS_FIBER
;
7356 np
->mac_xcvr
= MAC_XCVR_PCS
;
7359 case ESPC_PHY_TYPE_10G_COPPER
:
7360 /* 10G copper, XPCS */
7361 np
->flags
|= NIU_FLAGS_10G
;
7362 np
->flags
&= ~NIU_FLAGS_FIBER
;
7363 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7366 case ESPC_PHY_TYPE_10G_FIBER
:
7367 /* 10G fiber, XPCS */
7368 np
->flags
|= (NIU_FLAGS_10G
|
7370 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7374 dev_err(np
->device
, PFX
"Bogus SPROM phy type %u\n", val8
);
7378 val
= nr64(ESPC_MAC_ADDR0
);
7379 niudbg(PROBE
, "SPROM: MAC_ADDR0[%08llx]\n",
7380 (unsigned long long) val
);
7381 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
7382 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
7383 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
7384 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
7386 val
= nr64(ESPC_MAC_ADDR1
);
7387 niudbg(PROBE
, "SPROM: MAC_ADDR1[%08llx]\n",
7388 (unsigned long long) val
);
7389 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
7390 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
7392 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
7393 dev_err(np
->device
, PFX
"SPROM MAC address invalid\n");
7394 dev_err(np
->device
, PFX
"[ \n");
7395 for (i
= 0; i
< 6; i
++)
7396 printk("%02x ", dev
->perm_addr
[i
]);
7401 val8
= dev
->perm_addr
[5];
7402 dev
->perm_addr
[5] += np
->port
;
7403 if (dev
->perm_addr
[5] < val8
)
7404 dev
->perm_addr
[4]++;
7406 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7408 val
= nr64(ESPC_MOD_STR_LEN
);
7409 niudbg(PROBE
, "SPROM: MOD_STR_LEN[%llu]\n",
7410 (unsigned long long) val
);
7414 for (i
= 0; i
< val
; i
+= 4) {
7415 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
7417 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
7418 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
7419 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
7420 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
7422 np
->vpd
.model
[val
] = '\0';
7424 val
= nr64(ESPC_BD_MOD_STR_LEN
);
7425 niudbg(PROBE
, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7426 (unsigned long long) val
);
7430 for (i
= 0; i
< val
; i
+= 4) {
7431 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
7433 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
7434 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
7435 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
7436 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
7438 np
->vpd
.board_model
[val
] = '\0';
7441 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
7442 niudbg(PROBE
, "SPROM: NUM_PORTS_MACS[%d]\n",
7448 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
7450 struct niu_parent
*parent
= np
->parent
;
7453 np
->flags
|= NIU_FLAGS_XMAC
;
7455 if (!parent
->num_ports
) {
7456 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
7457 parent
->num_ports
= 2;
7459 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
7460 if (!parent
->num_ports
) {
7461 /* Fall back to SPROM as last resort.
7462 * This will fail on most cards.
7464 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
7465 ESPC_NUM_PORTS_MACS_VAL
;
7467 /* All of the current probing methods fail on
7468 * Maramba on-board parts.
7470 if (!parent
->num_ports
)
7471 parent
->num_ports
= 4;
7476 niudbg(PROBE
, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7477 np
->port
, parent
->num_ports
);
7478 if (np
->port
>= parent
->num_ports
)
7484 static int __devinit
phy_record(struct niu_parent
*parent
,
7485 struct phy_probe_info
*p
,
7486 int dev_id_1
, int dev_id_2
, u8 phy_port
,
7489 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
7492 if (dev_id_1
< 0 || dev_id_2
< 0)
7494 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
7495 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
7496 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
7497 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
7500 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
7504 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7506 (type
== PHY_TYPE_PMA_PMD
?
7508 (type
== PHY_TYPE_PCS
?
7512 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
7513 printk(KERN_ERR PFX
"Too many PHY ports.\n");
7517 p
->phy_id
[type
][idx
] = id
;
7518 p
->phy_port
[type
][idx
] = phy_port
;
7519 p
->cur
[type
] = idx
+ 1;
7523 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
7527 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
7528 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
7531 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
7532 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
7539 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
7545 for (port
= 8; port
< 32; port
++) {
7546 if (port_has_10g(p
, port
)) {
7556 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
7559 if (p
->cur
[PHY_TYPE_MII
])
7560 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
7562 return p
->cur
[PHY_TYPE_MII
];
7565 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
7567 int num_ports
= parent
->num_ports
;
7570 for (i
= 0; i
< num_ports
; i
++) {
7571 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
7572 parent
->txchan_per_port
[i
] = (16 / num_ports
);
7574 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7577 parent
->rxchan_per_port
[i
],
7578 parent
->txchan_per_port
[i
]);
7582 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
7583 int num_10g
, int num_1g
)
7585 int num_ports
= parent
->num_ports
;
7586 int rx_chans_per_10g
, rx_chans_per_1g
;
7587 int tx_chans_per_10g
, tx_chans_per_1g
;
7588 int i
, tot_rx
, tot_tx
;
7590 if (!num_10g
|| !num_1g
) {
7591 rx_chans_per_10g
= rx_chans_per_1g
=
7592 (NIU_NUM_RXCHAN
/ num_ports
);
7593 tx_chans_per_10g
= tx_chans_per_1g
=
7594 (NIU_NUM_TXCHAN
/ num_ports
);
7596 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
7597 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
7598 (rx_chans_per_1g
* num_1g
)) /
7601 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
7602 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
7603 (tx_chans_per_1g
* num_1g
)) /
7607 tot_rx
= tot_tx
= 0;
7608 for (i
= 0; i
< num_ports
; i
++) {
7609 int type
= phy_decode(parent
->port_phy
, i
);
7611 if (type
== PORT_TYPE_10G
) {
7612 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
7613 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
7615 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
7616 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
7618 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7621 parent
->rxchan_per_port
[i
],
7622 parent
->txchan_per_port
[i
]);
7623 tot_rx
+= parent
->rxchan_per_port
[i
];
7624 tot_tx
+= parent
->txchan_per_port
[i
];
7627 if (tot_rx
> NIU_NUM_RXCHAN
) {
7628 printk(KERN_ERR PFX
"niu%d: Too many RX channels (%d), "
7629 "resetting to one per port.\n",
7630 parent
->index
, tot_rx
);
7631 for (i
= 0; i
< num_ports
; i
++)
7632 parent
->rxchan_per_port
[i
] = 1;
7634 if (tot_tx
> NIU_NUM_TXCHAN
) {
7635 printk(KERN_ERR PFX
"niu%d: Too many TX channels (%d), "
7636 "resetting to one per port.\n",
7637 parent
->index
, tot_tx
);
7638 for (i
= 0; i
< num_ports
; i
++)
7639 parent
->txchan_per_port
[i
] = 1;
7641 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
7642 printk(KERN_WARNING PFX
"niu%d: Driver bug, wasted channels, "
7644 parent
->index
, tot_rx
, tot_tx
);
7648 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
7649 int num_10g
, int num_1g
)
7651 int i
, num_ports
= parent
->num_ports
;
7652 int rdc_group
, rdc_groups_per_port
;
7653 int rdc_channel_base
;
7656 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
7658 rdc_channel_base
= 0;
7660 for (i
= 0; i
< num_ports
; i
++) {
7661 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
7662 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
7663 int this_channel_offset
;
7665 tp
->first_table_num
= rdc_group
;
7666 tp
->num_tables
= rdc_groups_per_port
;
7667 this_channel_offset
= 0;
7668 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
7669 struct rdc_table
*rt
= &tp
->tables
[grp
];
7672 pr_info(PFX
"niu%d: Port %d RDC tbl(%d) [ ",
7673 parent
->index
, i
, tp
->first_table_num
+ grp
);
7674 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
7675 rt
->rxdma_channel
[slot
] =
7676 rdc_channel_base
+ this_channel_offset
;
7678 printk("%d ", rt
->rxdma_channel
[slot
]);
7680 if (++this_channel_offset
== num_channels
)
7681 this_channel_offset
= 0;
7686 parent
->rdc_default
[i
] = rdc_channel_base
;
7688 rdc_channel_base
+= num_channels
;
7689 rdc_group
+= rdc_groups_per_port
;
7693 static int __devinit
fill_phy_probe_info(struct niu
*np
,
7694 struct niu_parent
*parent
,
7695 struct phy_probe_info
*info
)
7697 unsigned long flags
;
7700 memset(info
, 0, sizeof(*info
));
7702 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
7703 niu_lock_parent(np
, flags
);
7705 for (port
= 8; port
< 32; port
++) {
7706 int dev_id_1
, dev_id_2
;
7708 dev_id_1
= mdio_read(np
, port
,
7709 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
7710 dev_id_2
= mdio_read(np
, port
,
7711 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
7712 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7716 dev_id_1
= mdio_read(np
, port
,
7717 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
7718 dev_id_2
= mdio_read(np
, port
,
7719 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
7720 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7724 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
7725 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
7726 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7731 niu_unlock_parent(np
, flags
);
7736 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
7738 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
7739 int lowest_10g
, lowest_1g
;
7740 int num_10g
, num_1g
;
7744 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
7745 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
7748 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
7749 parent
->num_ports
= 4;
7750 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7751 phy_encode(PORT_TYPE_1G
, 1) |
7752 phy_encode(PORT_TYPE_1G
, 2) |
7753 phy_encode(PORT_TYPE_1G
, 3));
7754 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
7757 parent
->num_ports
= 2;
7758 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7759 phy_encode(PORT_TYPE_10G
, 1));
7761 err
= fill_phy_probe_info(np
, parent
, info
);
7765 num_10g
= count_10g_ports(info
, &lowest_10g
);
7766 num_1g
= count_1g_ports(info
, &lowest_1g
);
7768 switch ((num_10g
<< 4) | num_1g
) {
7770 if (lowest_1g
== 10)
7771 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7772 else if (lowest_1g
== 26)
7773 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7775 goto unknown_vg_1g_port
;
7779 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7780 phy_encode(PORT_TYPE_10G
, 1) |
7781 phy_encode(PORT_TYPE_1G
, 2) |
7782 phy_encode(PORT_TYPE_1G
, 3));
7786 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7787 phy_encode(PORT_TYPE_10G
, 1));
7791 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
7795 if (lowest_1g
== 10)
7796 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7797 else if (lowest_1g
== 26)
7798 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7800 goto unknown_vg_1g_port
;
7804 if ((lowest_10g
& 0x7) == 0)
7805 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7806 phy_encode(PORT_TYPE_1G
, 1) |
7807 phy_encode(PORT_TYPE_1G
, 2) |
7808 phy_encode(PORT_TYPE_1G
, 3));
7810 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7811 phy_encode(PORT_TYPE_10G
, 1) |
7812 phy_encode(PORT_TYPE_1G
, 2) |
7813 phy_encode(PORT_TYPE_1G
, 3));
7817 if (lowest_1g
== 10)
7818 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7819 else if (lowest_1g
== 26)
7820 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7822 goto unknown_vg_1g_port
;
7824 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7825 phy_encode(PORT_TYPE_1G
, 1) |
7826 phy_encode(PORT_TYPE_1G
, 2) |
7827 phy_encode(PORT_TYPE_1G
, 3));
7831 printk(KERN_ERR PFX
"Unsupported port config "
7838 parent
->port_phy
= val
;
7840 if (parent
->plat_type
== PLAT_TYPE_NIU
)
7841 niu_n2_divide_channels(parent
);
7843 niu_divide_channels(parent
, num_10g
, num_1g
);
7845 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
7850 printk(KERN_ERR PFX
"Cannot identify platform type, 1gport=%d\n",
7855 static int __devinit
niu_probe_ports(struct niu
*np
)
7857 struct niu_parent
*parent
= np
->parent
;
7860 niudbg(PROBE
, "niu_probe_ports(): port_phy[%08x]\n",
7863 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
7864 err
= walk_phys(np
, parent
);
7868 niu_set_ldg_timer_res(np
, 2);
7869 for (i
= 0; i
<= LDN_MAX
; i
++)
7870 niu_ldn_irq_enable(np
, i
, 0);
7873 if (parent
->port_phy
== PORT_PHY_INVALID
)
7879 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
7881 struct niu_classifier
*cp
= &np
->clas
;
7883 niudbg(PROBE
, "niu_classifier_swstate_init: num_tcam(%d)\n",
7884 np
->parent
->tcam_num_entries
);
7886 cp
->tcam_index
= (u16
) np
->port
;
7887 cp
->h1_init
= 0xffffffff;
7888 cp
->h2_init
= 0xffff;
7890 return fflp_early_init(np
);
7893 static void __devinit
niu_link_config_init(struct niu
*np
)
7895 struct niu_link_config
*lp
= &np
->link_config
;
7897 lp
->advertising
= (ADVERTISED_10baseT_Half
|
7898 ADVERTISED_10baseT_Full
|
7899 ADVERTISED_100baseT_Half
|
7900 ADVERTISED_100baseT_Full
|
7901 ADVERTISED_1000baseT_Half
|
7902 ADVERTISED_1000baseT_Full
|
7903 ADVERTISED_10000baseT_Full
|
7904 ADVERTISED_Autoneg
);
7905 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
7906 lp
->duplex
= lp
->active_duplex
= DUPLEX_INVALID
;
7908 lp
->loopback_mode
= LOOPBACK_MAC
;
7909 lp
->active_speed
= SPEED_10000
;
7910 lp
->active_duplex
= DUPLEX_FULL
;
7912 lp
->loopback_mode
= LOOPBACK_DISABLED
;
7916 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
7920 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
7921 np
->ipp_off
= 0x00000;
7922 np
->pcs_off
= 0x04000;
7923 np
->xpcs_off
= 0x02000;
7927 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
7928 np
->ipp_off
= 0x08000;
7929 np
->pcs_off
= 0x0a000;
7930 np
->xpcs_off
= 0x08000;
7934 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
7935 np
->ipp_off
= 0x04000;
7936 np
->pcs_off
= 0x0e000;
7937 np
->xpcs_off
= ~0UL;
7941 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
7942 np
->ipp_off
= 0x0c000;
7943 np
->pcs_off
= 0x12000;
7944 np
->xpcs_off
= ~0UL;
7948 dev_err(np
->device
, PFX
"Port %u is invalid, cannot "
7949 "compute MAC block offset.\n", np
->port
);
7956 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
7958 struct msix_entry msi_vec
[NIU_NUM_LDG
];
7959 struct niu_parent
*parent
= np
->parent
;
7960 struct pci_dev
*pdev
= np
->pdev
;
7961 int i
, num_irqs
, err
;
7964 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
7965 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
7966 ldg_num_map
[i
] = first_ldg
+ i
;
7968 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
7969 parent
->txchan_per_port
[np
->port
] +
7970 (np
->port
== 0 ? 3 : 1));
7971 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
7974 for (i
= 0; i
< num_irqs
; i
++) {
7975 msi_vec
[i
].vector
= 0;
7976 msi_vec
[i
].entry
= i
;
7979 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
7981 np
->flags
&= ~NIU_FLAGS_MSIX
;
7989 np
->flags
|= NIU_FLAGS_MSIX
;
7990 for (i
= 0; i
< num_irqs
; i
++)
7991 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
7992 np
->num_ldg
= num_irqs
;
7995 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
7997 #ifdef CONFIG_SPARC64
7998 struct of_device
*op
= np
->op
;
7999 const u32
*int_prop
;
8002 int_prop
= of_get_property(op
->node
, "interrupts", NULL
);
8006 for (i
= 0; i
< op
->num_irqs
; i
++) {
8007 ldg_num_map
[i
] = int_prop
[i
];
8008 np
->ldg
[i
].irq
= op
->irqs
[i
];
8011 np
->num_ldg
= op
->num_irqs
;
8019 static int __devinit
niu_ldg_init(struct niu
*np
)
8021 struct niu_parent
*parent
= np
->parent
;
8022 u8 ldg_num_map
[NIU_NUM_LDG
];
8023 int first_chan
, num_chan
;
8024 int i
, err
, ldg_rotor
;
8028 np
->ldg
[0].irq
= np
->dev
->irq
;
8029 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8030 err
= niu_n2_irq_init(np
, ldg_num_map
);
8034 niu_try_msix(np
, ldg_num_map
);
8037 for (i
= 0; i
< np
->num_ldg
; i
++) {
8038 struct niu_ldg
*lp
= &np
->ldg
[i
];
8040 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
8043 lp
->ldg_num
= ldg_num_map
[i
];
8044 lp
->timer
= 2; /* XXX */
8046 /* On N2 NIU the firmware has setup the SID mappings so they go
8047 * to the correct values that will route the LDG to the proper
8048 * interrupt in the NCU interrupt table.
8050 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
8051 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
8057 /* We adopt the LDG assignment ordering used by the N2 NIU
8058 * 'interrupt' properties because that simplifies a lot of
8059 * things. This ordering is:
8062 * MIF (if port zero)
8063 * SYSERR (if port zero)
8070 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
8076 if (ldg_rotor
== np
->num_ldg
)
8080 err
= niu_ldg_assign_ldn(np
, parent
,
8081 ldg_num_map
[ldg_rotor
],
8087 if (ldg_rotor
== np
->num_ldg
)
8090 err
= niu_ldg_assign_ldn(np
, parent
,
8091 ldg_num_map
[ldg_rotor
],
8097 if (ldg_rotor
== np
->num_ldg
)
8103 for (i
= 0; i
< port
; i
++)
8104 first_chan
+= parent
->rxchan_per_port
[port
];
8105 num_chan
= parent
->rxchan_per_port
[port
];
8107 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8108 err
= niu_ldg_assign_ldn(np
, parent
,
8109 ldg_num_map
[ldg_rotor
],
8114 if (ldg_rotor
== np
->num_ldg
)
8119 for (i
= 0; i
< port
; i
++)
8120 first_chan
+= parent
->txchan_per_port
[port
];
8121 num_chan
= parent
->txchan_per_port
[port
];
8122 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8123 err
= niu_ldg_assign_ldn(np
, parent
,
8124 ldg_num_map
[ldg_rotor
],
8129 if (ldg_rotor
== np
->num_ldg
)
8136 static void __devexit
niu_ldg_free(struct niu
*np
)
8138 if (np
->flags
& NIU_FLAGS_MSIX
)
8139 pci_disable_msix(np
->pdev
);
8142 static int __devinit
niu_get_of_props(struct niu
*np
)
8144 #ifdef CONFIG_SPARC64
8145 struct net_device
*dev
= np
->dev
;
8146 struct device_node
*dp
;
8147 const char *phy_type
;
8152 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8155 dp
= pci_device_to_OF_node(np
->pdev
);
8157 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
8159 dev_err(np
->device
, PFX
"%s: OF node lacks "
8160 "phy-type property\n",
8165 if (!strcmp(phy_type
, "none"))
8168 strcpy(np
->vpd
.phy_type
, phy_type
);
8170 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8171 dev_err(np
->device
, PFX
"%s: Illegal phy string [%s].\n",
8172 dp
->full_name
, np
->vpd
.phy_type
);
8176 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
8178 dev_err(np
->device
, PFX
"%s: OF node lacks "
8179 "local-mac-address property\n",
8183 if (prop_len
!= dev
->addr_len
) {
8184 dev_err(np
->device
, PFX
"%s: OF MAC address prop len (%d) "
8186 dp
->full_name
, prop_len
);
8188 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
8189 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8192 dev_err(np
->device
, PFX
"%s: OF MAC address is invalid\n",
8194 dev_err(np
->device
, PFX
"%s: [ \n",
8196 for (i
= 0; i
< 6; i
++)
8197 printk("%02x ", dev
->perm_addr
[i
]);
8202 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8204 model
= of_get_property(dp
, "model", &prop_len
);
8207 strcpy(np
->vpd
.model
, model
);
8215 static int __devinit
niu_get_invariants(struct niu
*np
)
8217 int err
, have_props
;
8220 err
= niu_get_of_props(np
);
8226 err
= niu_init_mac_ipp_pcs_base(np
);
8231 err
= niu_get_and_validate_port(np
);
8236 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8239 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
8240 offset
= niu_pci_vpd_offset(np
);
8241 niudbg(PROBE
, "niu_get_invariants: VPD offset [%08x]\n",
8244 niu_pci_vpd_fetch(np
, offset
);
8245 nw64(ESPC_PIO_EN
, 0);
8247 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
8248 niu_pci_vpd_validate(np
);
8249 err
= niu_get_and_validate_port(np
);
8254 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
8255 err
= niu_get_and_validate_port(np
);
8258 err
= niu_pci_probe_sprom(np
);
8264 err
= niu_probe_ports(np
);
8270 niu_classifier_swstate_init(np
);
8271 niu_link_config_init(np
);
8273 err
= niu_determine_phy_disposition(np
);
8275 err
= niu_init_link(np
);
8280 static LIST_HEAD(niu_parent_list
);
8281 static DEFINE_MUTEX(niu_parent_lock
);
8282 static int niu_parent_index
;
8284 static ssize_t
show_port_phy(struct device
*dev
,
8285 struct device_attribute
*attr
, char *buf
)
8287 struct platform_device
*plat_dev
= to_platform_device(dev
);
8288 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8289 u32 port_phy
= p
->port_phy
;
8290 char *orig_buf
= buf
;
8293 if (port_phy
== PORT_PHY_UNKNOWN
||
8294 port_phy
== PORT_PHY_INVALID
)
8297 for (i
= 0; i
< p
->num_ports
; i
++) {
8298 const char *type_str
;
8301 type
= phy_decode(port_phy
, i
);
8302 if (type
== PORT_TYPE_10G
)
8307 (i
== 0) ? "%s" : " %s",
8310 buf
+= sprintf(buf
, "\n");
8311 return buf
- orig_buf
;
8314 static ssize_t
show_plat_type(struct device
*dev
,
8315 struct device_attribute
*attr
, char *buf
)
8317 struct platform_device
*plat_dev
= to_platform_device(dev
);
8318 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8319 const char *type_str
;
8321 switch (p
->plat_type
) {
8322 case PLAT_TYPE_ATLAS
:
8328 case PLAT_TYPE_VF_P0
:
8331 case PLAT_TYPE_VF_P1
:
8335 type_str
= "unknown";
8339 return sprintf(buf
, "%s\n", type_str
);
8342 static ssize_t
__show_chan_per_port(struct device
*dev
,
8343 struct device_attribute
*attr
, char *buf
,
8346 struct platform_device
*plat_dev
= to_platform_device(dev
);
8347 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8348 char *orig_buf
= buf
;
8352 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
8354 for (i
= 0; i
< p
->num_ports
; i
++) {
8356 (i
== 0) ? "%d" : " %d",
8359 buf
+= sprintf(buf
, "\n");
8361 return buf
- orig_buf
;
8364 static ssize_t
show_rxchan_per_port(struct device
*dev
,
8365 struct device_attribute
*attr
, char *buf
)
8367 return __show_chan_per_port(dev
, attr
, buf
, 1);
8370 static ssize_t
show_txchan_per_port(struct device
*dev
,
8371 struct device_attribute
*attr
, char *buf
)
8373 return __show_chan_per_port(dev
, attr
, buf
, 1);
8376 static ssize_t
show_num_ports(struct device
*dev
,
8377 struct device_attribute
*attr
, char *buf
)
8379 struct platform_device
*plat_dev
= to_platform_device(dev
);
8380 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8382 return sprintf(buf
, "%d\n", p
->num_ports
);
8385 static struct device_attribute niu_parent_attributes
[] = {
8386 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
8387 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
8388 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
8389 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
8390 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
8394 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
8395 union niu_parent_id
*id
,
8398 struct platform_device
*plat_dev
;
8399 struct niu_parent
*p
;
8402 niudbg(PROBE
, "niu_new_parent: Creating new parent.\n");
8404 plat_dev
= platform_device_register_simple("niu", niu_parent_index
,
8409 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
8410 int err
= device_create_file(&plat_dev
->dev
,
8411 &niu_parent_attributes
[i
]);
8413 goto fail_unregister
;
8416 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
8418 goto fail_unregister
;
8420 p
->index
= niu_parent_index
++;
8422 plat_dev
->dev
.platform_data
= p
;
8423 p
->plat_dev
= plat_dev
;
8425 memcpy(&p
->id
, id
, sizeof(*id
));
8426 p
->plat_type
= ptype
;
8427 INIT_LIST_HEAD(&p
->list
);
8428 atomic_set(&p
->refcnt
, 0);
8429 list_add(&p
->list
, &niu_parent_list
);
8430 spin_lock_init(&p
->lock
);
8432 p
->rxdma_clock_divider
= 7500;
8434 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
8435 if (p
->plat_type
== PLAT_TYPE_NIU
)
8436 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
8438 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
8439 int index
= i
- CLASS_CODE_USER_PROG1
;
8441 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
8442 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
8445 (FLOW_KEY_L4_BYTE12
<<
8446 FLOW_KEY_L4_0_SHIFT
) |
8447 (FLOW_KEY_L4_BYTE12
<<
8448 FLOW_KEY_L4_1_SHIFT
));
8451 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
8452 p
->ldg_map
[i
] = LDG_INVALID
;
8457 platform_device_unregister(plat_dev
);
8461 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
8462 union niu_parent_id
*id
,
8465 struct niu_parent
*p
, *tmp
;
8466 int port
= np
->port
;
8468 niudbg(PROBE
, "niu_get_parent: platform_type[%u] port[%u]\n",
8471 mutex_lock(&niu_parent_lock
);
8473 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
8474 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
8480 p
= niu_new_parent(np
, id
, ptype
);
8486 sprintf(port_name
, "port%d", port
);
8487 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
8491 p
->ports
[port
] = np
;
8492 atomic_inc(&p
->refcnt
);
8495 mutex_unlock(&niu_parent_lock
);
8500 static void niu_put_parent(struct niu
*np
)
8502 struct niu_parent
*p
= np
->parent
;
8506 BUG_ON(!p
|| p
->ports
[port
] != np
);
8508 niudbg(PROBE
, "niu_put_parent: port[%u]\n", port
);
8510 sprintf(port_name
, "port%d", port
);
8512 mutex_lock(&niu_parent_lock
);
8514 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
8516 p
->ports
[port
] = NULL
;
8519 if (atomic_dec_and_test(&p
->refcnt
)) {
8521 platform_device_unregister(p
->plat_dev
);
8524 mutex_unlock(&niu_parent_lock
);
8527 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
8528 u64
*handle
, gfp_t flag
)
8533 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
8539 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
8540 void *cpu_addr
, u64 handle
)
8542 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
8545 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
8546 unsigned long offset
, size_t size
,
8547 enum dma_data_direction direction
)
8549 return dma_map_page(dev
, page
, offset
, size
, direction
);
8552 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
8553 size_t size
, enum dma_data_direction direction
)
8555 return dma_unmap_page(dev
, dma_address
, size
, direction
);
8558 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
8560 enum dma_data_direction direction
)
8562 return dma_map_single(dev
, cpu_addr
, size
, direction
);
8565 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
8567 enum dma_data_direction direction
)
8569 dma_unmap_single(dev
, dma_address
, size
, direction
);
8572 static const struct niu_ops niu_pci_ops
= {
8573 .alloc_coherent
= niu_pci_alloc_coherent
,
8574 .free_coherent
= niu_pci_free_coherent
,
8575 .map_page
= niu_pci_map_page
,
8576 .unmap_page
= niu_pci_unmap_page
,
8577 .map_single
= niu_pci_map_single
,
8578 .unmap_single
= niu_pci_unmap_single
,
8581 static void __devinit
niu_driver_version(void)
8583 static int niu_version_printed
;
8585 if (niu_version_printed
++ == 0)
8586 pr_info("%s", version
);
8589 static struct net_device
* __devinit
niu_alloc_and_init(
8590 struct device
*gen_dev
, struct pci_dev
*pdev
,
8591 struct of_device
*op
, const struct niu_ops
*ops
,
8594 struct net_device
*dev
;
8597 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
8599 dev_err(gen_dev
, PFX
"Etherdev alloc failed, aborting.\n");
8603 SET_NETDEV_DEV(dev
, gen_dev
);
8605 np
= netdev_priv(dev
);
8609 np
->device
= gen_dev
;
8612 np
->msg_enable
= niu_debug
;
8614 spin_lock_init(&np
->lock
);
8615 INIT_WORK(&np
->reset_task
, niu_reset_task
);
8622 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
8624 dev
->open
= niu_open
;
8625 dev
->stop
= niu_close
;
8626 dev
->get_stats
= niu_get_stats
;
8627 dev
->set_multicast_list
= niu_set_rx_mode
;
8628 dev
->set_mac_address
= niu_set_mac_addr
;
8629 dev
->do_ioctl
= niu_ioctl
;
8630 dev
->tx_timeout
= niu_tx_timeout
;
8631 dev
->hard_start_xmit
= niu_start_xmit
;
8632 dev
->ethtool_ops
= &niu_ethtool_ops
;
8633 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
8634 dev
->change_mtu
= niu_change_mtu
;
8637 static void __devinit
niu_device_announce(struct niu
*np
)
8639 struct net_device
*dev
= np
->dev
;
8640 DECLARE_MAC_BUF(mac
);
8642 pr_info("%s: NIU Ethernet %s\n",
8643 dev
->name
, print_mac(mac
, dev
->dev_addr
));
8645 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
8646 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8648 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8649 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8650 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
8651 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8652 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8655 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8657 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8658 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8659 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" : "COPPER"),
8660 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8661 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8666 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
8667 const struct pci_device_id
*ent
)
8669 unsigned long niureg_base
, niureg_len
;
8670 union niu_parent_id parent_id
;
8671 struct net_device
*dev
;
8677 niu_driver_version();
8679 err
= pci_enable_device(pdev
);
8681 dev_err(&pdev
->dev
, PFX
"Cannot enable PCI device, "
8686 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
8687 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8688 dev_err(&pdev
->dev
, PFX
"Cannot find proper PCI device "
8689 "base addresses, aborting.\n");
8691 goto err_out_disable_pdev
;
8694 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8696 dev_err(&pdev
->dev
, PFX
"Cannot obtain PCI resources, "
8698 goto err_out_disable_pdev
;
8701 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8703 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
8705 goto err_out_free_res
;
8708 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
8709 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
8712 goto err_out_free_res
;
8714 np
= netdev_priv(dev
);
8716 memset(&parent_id
, 0, sizeof(parent_id
));
8717 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
8718 parent_id
.pci
.bus
= pdev
->bus
->number
;
8719 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
8721 np
->parent
= niu_get_parent(np
, &parent_id
,
8725 goto err_out_free_dev
;
8728 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
8729 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
8730 val16
|= (PCI_EXP_DEVCTL_CERE
|
8731 PCI_EXP_DEVCTL_NFERE
|
8732 PCI_EXP_DEVCTL_FERE
|
8733 PCI_EXP_DEVCTL_URRE
|
8734 PCI_EXP_DEVCTL_RELAX_EN
);
8735 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
8737 dma_mask
= DMA_44BIT_MASK
;
8738 err
= pci_set_dma_mask(pdev
, dma_mask
);
8740 dev
->features
|= NETIF_F_HIGHDMA
;
8741 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
8743 dev_err(&pdev
->dev
, PFX
"Unable to obtain 44 bit "
8744 "DMA for consistent allocations, "
8746 goto err_out_release_parent
;
8749 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
8750 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
8752 dev_err(&pdev
->dev
, PFX
"No usable DMA configuration, "
8754 goto err_out_release_parent
;
8758 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
8760 niureg_base
= pci_resource_start(pdev
, 0);
8761 niureg_len
= pci_resource_len(pdev
, 0);
8763 np
->regs
= ioremap_nocache(niureg_base
, niureg_len
);
8765 dev_err(&pdev
->dev
, PFX
"Cannot map device registers, "
8768 goto err_out_release_parent
;
8771 pci_set_master(pdev
);
8772 pci_save_state(pdev
);
8774 dev
->irq
= pdev
->irq
;
8776 niu_assign_netdev_ops(dev
);
8778 err
= niu_get_invariants(np
);
8781 dev_err(&pdev
->dev
, PFX
"Problem fetching invariants "
8782 "of chip, aborting.\n");
8783 goto err_out_iounmap
;
8786 err
= register_netdev(dev
);
8788 dev_err(&pdev
->dev
, PFX
"Cannot register net device, "
8790 goto err_out_iounmap
;
8793 pci_set_drvdata(pdev
, dev
);
8795 niu_device_announce(np
);
8805 err_out_release_parent
:
8812 pci_release_regions(pdev
);
8814 err_out_disable_pdev
:
8815 pci_disable_device(pdev
);
8816 pci_set_drvdata(pdev
, NULL
);
8821 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
8823 struct net_device
*dev
= pci_get_drvdata(pdev
);
8826 struct niu
*np
= netdev_priv(dev
);
8828 unregister_netdev(dev
);
8839 pci_release_regions(pdev
);
8840 pci_disable_device(pdev
);
8841 pci_set_drvdata(pdev
, NULL
);
8845 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
8847 struct net_device
*dev
= pci_get_drvdata(pdev
);
8848 struct niu
*np
= netdev_priv(dev
);
8849 unsigned long flags
;
8851 if (!netif_running(dev
))
8854 flush_scheduled_work();
8857 del_timer_sync(&np
->timer
);
8859 spin_lock_irqsave(&np
->lock
, flags
);
8860 niu_enable_interrupts(np
, 0);
8861 spin_unlock_irqrestore(&np
->lock
, flags
);
8863 netif_device_detach(dev
);
8865 spin_lock_irqsave(&np
->lock
, flags
);
8867 spin_unlock_irqrestore(&np
->lock
, flags
);
8869 pci_save_state(pdev
);
8874 static int niu_resume(struct pci_dev
*pdev
)
8876 struct net_device
*dev
= pci_get_drvdata(pdev
);
8877 struct niu
*np
= netdev_priv(dev
);
8878 unsigned long flags
;
8881 if (!netif_running(dev
))
8884 pci_restore_state(pdev
);
8886 netif_device_attach(dev
);
8888 spin_lock_irqsave(&np
->lock
, flags
);
8890 err
= niu_init_hw(np
);
8892 np
->timer
.expires
= jiffies
+ HZ
;
8893 add_timer(&np
->timer
);
8894 niu_netif_start(np
);
8897 spin_unlock_irqrestore(&np
->lock
, flags
);
8902 static struct pci_driver niu_pci_driver
= {
8903 .name
= DRV_MODULE_NAME
,
8904 .id_table
= niu_pci_tbl
,
8905 .probe
= niu_pci_init_one
,
8906 .remove
= __devexit_p(niu_pci_remove_one
),
8907 .suspend
= niu_suspend
,
8908 .resume
= niu_resume
,
8911 #ifdef CONFIG_SPARC64
8912 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
8913 u64
*dma_addr
, gfp_t flag
)
8915 unsigned long order
= get_order(size
);
8916 unsigned long page
= __get_free_pages(flag
, order
);
8920 memset((char *)page
, 0, PAGE_SIZE
<< order
);
8921 *dma_addr
= __pa(page
);
8923 return (void *) page
;
8926 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
8927 void *cpu_addr
, u64 handle
)
8929 unsigned long order
= get_order(size
);
8931 free_pages((unsigned long) cpu_addr
, order
);
8934 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
8935 unsigned long offset
, size_t size
,
8936 enum dma_data_direction direction
)
8938 return page_to_phys(page
) + offset
;
8941 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
8942 size_t size
, enum dma_data_direction direction
)
8944 /* Nothing to do. */
8947 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
8949 enum dma_data_direction direction
)
8951 return __pa(cpu_addr
);
8954 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
8956 enum dma_data_direction direction
)
8958 /* Nothing to do. */
8961 static const struct niu_ops niu_phys_ops
= {
8962 .alloc_coherent
= niu_phys_alloc_coherent
,
8963 .free_coherent
= niu_phys_free_coherent
,
8964 .map_page
= niu_phys_map_page
,
8965 .unmap_page
= niu_phys_unmap_page
,
8966 .map_single
= niu_phys_map_single
,
8967 .unmap_single
= niu_phys_unmap_single
,
8970 static unsigned long res_size(struct resource
*r
)
8972 return r
->end
- r
->start
+ 1UL;
8975 static int __devinit
niu_of_probe(struct of_device
*op
,
8976 const struct of_device_id
*match
)
8978 union niu_parent_id parent_id
;
8979 struct net_device
*dev
;
8984 niu_driver_version();
8986 reg
= of_get_property(op
->node
, "reg", NULL
);
8988 dev_err(&op
->dev
, PFX
"%s: No 'reg' property, aborting.\n",
8989 op
->node
->full_name
);
8993 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
8994 &niu_phys_ops
, reg
[0] & 0x1);
8999 np
= netdev_priv(dev
);
9001 memset(&parent_id
, 0, sizeof(parent_id
));
9002 parent_id
.of
= of_get_parent(op
->node
);
9004 np
->parent
= niu_get_parent(np
, &parent_id
,
9008 goto err_out_free_dev
;
9011 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9013 np
->regs
= of_ioremap(&op
->resource
[1], 0,
9014 res_size(&op
->resource
[1]),
9017 dev_err(&op
->dev
, PFX
"Cannot map device registers, "
9020 goto err_out_release_parent
;
9023 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
9024 res_size(&op
->resource
[2]),
9026 if (!np
->vir_regs_1
) {
9027 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 1, "
9030 goto err_out_iounmap
;
9033 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
9034 res_size(&op
->resource
[3]),
9036 if (!np
->vir_regs_2
) {
9037 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 2, "
9040 goto err_out_iounmap
;
9043 niu_assign_netdev_ops(dev
);
9045 err
= niu_get_invariants(np
);
9048 dev_err(&op
->dev
, PFX
"Problem fetching invariants "
9049 "of chip, aborting.\n");
9050 goto err_out_iounmap
;
9053 err
= register_netdev(dev
);
9055 dev_err(&op
->dev
, PFX
"Cannot register net device, "
9057 goto err_out_iounmap
;
9060 dev_set_drvdata(&op
->dev
, dev
);
9062 niu_device_announce(np
);
9067 if (np
->vir_regs_1
) {
9068 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9069 res_size(&op
->resource
[2]));
9070 np
->vir_regs_1
= NULL
;
9073 if (np
->vir_regs_2
) {
9074 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9075 res_size(&op
->resource
[3]));
9076 np
->vir_regs_2
= NULL
;
9080 of_iounmap(&op
->resource
[1], np
->regs
,
9081 res_size(&op
->resource
[1]));
9085 err_out_release_parent
:
9095 static int __devexit
niu_of_remove(struct of_device
*op
)
9097 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
9100 struct niu
*np
= netdev_priv(dev
);
9102 unregister_netdev(dev
);
9104 if (np
->vir_regs_1
) {
9105 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9106 res_size(&op
->resource
[2]));
9107 np
->vir_regs_1
= NULL
;
9110 if (np
->vir_regs_2
) {
9111 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9112 res_size(&op
->resource
[3]));
9113 np
->vir_regs_2
= NULL
;
9117 of_iounmap(&op
->resource
[1], np
->regs
,
9118 res_size(&op
->resource
[1]));
9127 dev_set_drvdata(&op
->dev
, NULL
);
9132 static struct of_device_id niu_match
[] = {
9135 .compatible
= "SUNW,niusl",
9139 MODULE_DEVICE_TABLE(of
, niu_match
);
9141 static struct of_platform_driver niu_of_driver
= {
9143 .match_table
= niu_match
,
9144 .probe
= niu_of_probe
,
9145 .remove
= __devexit_p(niu_of_remove
),
9148 #endif /* CONFIG_SPARC64 */
9150 static int __init
niu_init(void)
9154 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
9156 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
9158 #ifdef CONFIG_SPARC64
9159 err
= of_register_driver(&niu_of_driver
, &of_bus_type
);
9163 err
= pci_register_driver(&niu_pci_driver
);
9164 #ifdef CONFIG_SPARC64
9166 of_unregister_driver(&niu_of_driver
);
9173 static void __exit
niu_exit(void)
9175 pci_unregister_driver(&niu_pci_driver
);
9176 #ifdef CONFIG_SPARC64
9177 of_unregister_driver(&niu_of_driver
);
9181 module_init(niu_init
);
9182 module_exit(niu_exit
);