remove malta-mtd.c and fix MAINTAINERS
[linux-2.6/zen-sources.git] / drivers / net / niu.c
blob1b6f548c4411203a7666e0f1703de8fcec4aabf5
1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ip.h>
20 #include <linux/in.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
26 #include <linux/io.h>
28 #ifdef CONFIG_SPARC64
29 #include <linux/of_device.h>
30 #endif
32 #include "niu.h"
34 #define DRV_MODULE_NAME "niu"
35 #define PFX DRV_MODULE_NAME ": "
36 #define DRV_MODULE_VERSION "1.0"
37 #define DRV_MODULE_RELDATE "Nov 14, 2008"
39 static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK 0x00000fffffffffffULL
49 #endif
51 #ifndef readq
52 static u64 readq(void __iomem *reg)
54 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
57 static void writeq(u64 val, void __iomem *reg)
59 writel(val & 0xffffffff, reg);
60 writel(val >> 32, reg + 0x4UL);
62 #endif
64 static struct pci_device_id niu_pci_tbl[] = {
65 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
69 MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
71 #define NIU_TX_TIMEOUT (5 * HZ)
73 #define nr64(reg) readq(np->regs + (reg))
74 #define nw64(reg, val) writeq((val), np->regs + (reg))
76 #define nr64_mac(reg) readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
79 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
82 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
85 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
90 static int niu_debug;
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "NIU debug level");
95 #define niudbg(TYPE, f, a...) \
96 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97 printk(KERN_DEBUG PFX f, ## a); \
98 } while (0)
100 #define niuinfo(TYPE, f, a...) \
101 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102 printk(KERN_INFO PFX f, ## a); \
103 } while (0)
105 #define niuwarn(TYPE, f, a...) \
106 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107 printk(KERN_WARNING PFX f, ## a); \
108 } while (0)
110 #define niu_lock_parent(np, flags) \
111 spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113 spin_unlock_irqrestore(&np->parent->lock, flags)
115 static int serdes_init_10g_serdes(struct niu *np);
117 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
118 u64 bits, int limit, int delay)
120 while (--limit >= 0) {
121 u64 val = nr64_mac(reg);
123 if (!(val & bits))
124 break;
125 udelay(delay);
127 if (limit < 0)
128 return -ENODEV;
129 return 0;
132 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
133 u64 bits, int limit, int delay,
134 const char *reg_name)
136 int err;
138 nw64_mac(reg, bits);
139 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
140 if (err)
141 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
142 "would not clear, val[%llx]\n",
143 np->dev->name, (unsigned long long) bits, reg_name,
144 (unsigned long long) nr64_mac(reg));
145 return err;
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
153 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
154 u64 bits, int limit, int delay)
156 while (--limit >= 0) {
157 u64 val = nr64_ipp(reg);
159 if (!(val & bits))
160 break;
161 udelay(delay);
163 if (limit < 0)
164 return -ENODEV;
165 return 0;
168 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
169 u64 bits, int limit, int delay,
170 const char *reg_name)
172 int err;
173 u64 val;
175 val = nr64_ipp(reg);
176 val |= bits;
177 nw64_ipp(reg, val);
179 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
180 if (err)
181 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
182 "would not clear, val[%llx]\n",
183 np->dev->name, (unsigned long long) bits, reg_name,
184 (unsigned long long) nr64_ipp(reg));
185 return err;
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
193 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194 u64 bits, int limit, int delay)
196 while (--limit >= 0) {
197 u64 val = nr64(reg);
199 if (!(val & bits))
200 break;
201 udelay(delay);
203 if (limit < 0)
204 return -ENODEV;
205 return 0;
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
213 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
214 u64 bits, int limit, int delay,
215 const char *reg_name)
217 int err;
219 nw64(reg, bits);
220 err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
221 if (err)
222 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
223 "would not clear, val[%llx]\n",
224 np->dev->name, (unsigned long long) bits, reg_name,
225 (unsigned long long) nr64(reg));
226 return err;
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
234 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
236 u64 val = (u64) lp->timer;
238 if (on)
239 val |= LDG_IMGMT_ARM;
241 nw64(LDG_IMGMT(lp->ldg_num), val);
244 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
246 unsigned long mask_reg, bits;
247 u64 val;
249 if (ldn < 0 || ldn > LDN_MAX)
250 return -EINVAL;
252 if (ldn < 64) {
253 mask_reg = LD_IM0(ldn);
254 bits = LD_IM0_MASK;
255 } else {
256 mask_reg = LD_IM1(ldn - 64);
257 bits = LD_IM1_MASK;
260 val = nr64(mask_reg);
261 if (on)
262 val &= ~bits;
263 else
264 val |= bits;
265 nw64(mask_reg, val);
267 return 0;
270 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
272 struct niu_parent *parent = np->parent;
273 int i;
275 for (i = 0; i <= LDN_MAX; i++) {
276 int err;
278 if (parent->ldg_map[i] != lp->ldg_num)
279 continue;
281 err = niu_ldn_irq_enable(np, i, on);
282 if (err)
283 return err;
285 return 0;
288 static int niu_enable_interrupts(struct niu *np, int on)
290 int i;
292 for (i = 0; i < np->num_ldg; i++) {
293 struct niu_ldg *lp = &np->ldg[i];
294 int err;
296 err = niu_enable_ldn_in_ldg(np, lp, on);
297 if (err)
298 return err;
300 for (i = 0; i < np->num_ldg; i++)
301 niu_ldg_rearm(np, &np->ldg[i], on);
303 return 0;
306 static u32 phy_encode(u32 type, int port)
308 return (type << (port * 2));
311 static u32 phy_decode(u32 val, int port)
313 return (val >> (port * 2)) & PORT_TYPE_MASK;
316 static int mdio_wait(struct niu *np)
318 int limit = 1000;
319 u64 val;
321 while (--limit > 0) {
322 val = nr64(MIF_FRAME_OUTPUT);
323 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
324 return val & MIF_FRAME_OUTPUT_DATA;
326 udelay(10);
329 return -ENODEV;
332 static int mdio_read(struct niu *np, int port, int dev, int reg)
334 int err;
336 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
337 err = mdio_wait(np);
338 if (err < 0)
339 return err;
341 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
342 return mdio_wait(np);
345 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
347 int err;
349 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
350 err = mdio_wait(np);
351 if (err < 0)
352 return err;
354 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
355 err = mdio_wait(np);
356 if (err < 0)
357 return err;
359 return 0;
362 static int mii_read(struct niu *np, int port, int reg)
364 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
365 return mdio_wait(np);
368 static int mii_write(struct niu *np, int port, int reg, int data)
370 int err;
372 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
373 err = mdio_wait(np);
374 if (err < 0)
375 return err;
377 return 0;
380 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
382 int err;
384 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
385 ESR2_TI_PLL_TX_CFG_L(channel),
386 val & 0xffff);
387 if (!err)
388 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389 ESR2_TI_PLL_TX_CFG_H(channel),
390 val >> 16);
391 return err;
394 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
396 int err;
398 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
399 ESR2_TI_PLL_RX_CFG_L(channel),
400 val & 0xffff);
401 if (!err)
402 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
403 ESR2_TI_PLL_RX_CFG_H(channel),
404 val >> 16);
405 return err;
408 /* Mode is always 10G fiber. */
409 static int serdes_init_niu_10g_fiber(struct niu *np)
411 struct niu_link_config *lp = &np->link_config;
412 u32 tx_cfg, rx_cfg;
413 unsigned long i;
415 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
416 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
417 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
418 PLL_RX_CFG_EQ_LP_ADAPTIVE);
420 if (lp->loopback_mode == LOOPBACK_PHY) {
421 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
423 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
424 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
426 tx_cfg |= PLL_TX_CFG_ENTEST;
427 rx_cfg |= PLL_RX_CFG_ENTEST;
430 /* Initialize all 4 lanes of the SERDES. */
431 for (i = 0; i < 4; i++) {
432 int err = esr2_set_tx_cfg(np, i, tx_cfg);
433 if (err)
434 return err;
437 for (i = 0; i < 4; i++) {
438 int err = esr2_set_rx_cfg(np, i, rx_cfg);
439 if (err)
440 return err;
443 return 0;
446 static int serdes_init_niu_1g_serdes(struct niu *np)
448 struct niu_link_config *lp = &np->link_config;
449 u16 pll_cfg, pll_sts;
450 int max_retry = 100;
451 u64 sig, mask, val;
452 u32 tx_cfg, rx_cfg;
453 unsigned long i;
454 int err;
456 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
457 PLL_TX_CFG_RATE_HALF);
458 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
459 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
460 PLL_RX_CFG_RATE_HALF);
462 if (np->port == 0)
463 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
465 if (lp->loopback_mode == LOOPBACK_PHY) {
466 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
471 tx_cfg |= PLL_TX_CFG_ENTEST;
472 rx_cfg |= PLL_RX_CFG_ENTEST;
475 /* Initialize PLL for 1G */
476 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
479 ESR2_TI_PLL_CFG_L, pll_cfg);
480 if (err) {
481 dev_err(np->device, PFX "NIU Port %d "
482 "serdes_init_niu_1g_serdes: "
483 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
484 return err;
487 pll_sts = PLL_CFG_ENPLL;
489 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
490 ESR2_TI_PLL_STS_L, pll_sts);
491 if (err) {
492 dev_err(np->device, PFX "NIU Port %d "
493 "serdes_init_niu_1g_serdes: "
494 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
495 return err;
498 udelay(200);
500 /* Initialize all 4 lanes of the SERDES. */
501 for (i = 0; i < 4; i++) {
502 err = esr2_set_tx_cfg(np, i, tx_cfg);
503 if (err)
504 return err;
507 for (i = 0; i < 4; i++) {
508 err = esr2_set_rx_cfg(np, i, rx_cfg);
509 if (err)
510 return err;
513 switch (np->port) {
514 case 0:
515 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
516 mask = val;
517 break;
519 case 1:
520 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
521 mask = val;
522 break;
524 default:
525 return -EINVAL;
528 while (max_retry--) {
529 sig = nr64(ESR_INT_SIGNALS);
530 if ((sig & mask) == val)
531 break;
533 mdelay(500);
536 if ((sig & mask) != val) {
537 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
538 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
539 return -ENODEV;
542 return 0;
545 static int serdes_init_niu_10g_serdes(struct niu *np)
547 struct niu_link_config *lp = &np->link_config;
548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549 int max_retry = 100;
550 u64 sig, mask, val;
551 unsigned long i;
552 int err;
554 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
555 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
556 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
557 PLL_RX_CFG_EQ_LP_ADAPTIVE);
559 if (lp->loopback_mode == LOOPBACK_PHY) {
560 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
562 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
565 tx_cfg |= PLL_TX_CFG_ENTEST;
566 rx_cfg |= PLL_RX_CFG_ENTEST;
569 /* Initialize PLL for 10G */
570 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
572 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
573 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
574 if (err) {
575 dev_err(np->device, PFX "NIU Port %d "
576 "serdes_init_niu_10g_serdes: "
577 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
578 return err;
581 pll_sts = PLL_CFG_ENPLL;
583 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
584 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
585 if (err) {
586 dev_err(np->device, PFX "NIU Port %d "
587 "serdes_init_niu_10g_serdes: "
588 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
589 return err;
592 udelay(200);
594 /* Initialize all 4 lanes of the SERDES. */
595 for (i = 0; i < 4; i++) {
596 err = esr2_set_tx_cfg(np, i, tx_cfg);
597 if (err)
598 return err;
601 for (i = 0; i < 4; i++) {
602 err = esr2_set_rx_cfg(np, i, rx_cfg);
603 if (err)
604 return err;
607 /* check if serdes is ready */
609 switch (np->port) {
610 case 0:
611 mask = ESR_INT_SIGNALS_P0_BITS;
612 val = (ESR_INT_SRDY0_P0 |
613 ESR_INT_DET0_P0 |
614 ESR_INT_XSRDY_P0 |
615 ESR_INT_XDP_P0_CH3 |
616 ESR_INT_XDP_P0_CH2 |
617 ESR_INT_XDP_P0_CH1 |
618 ESR_INT_XDP_P0_CH0);
619 break;
621 case 1:
622 mask = ESR_INT_SIGNALS_P1_BITS;
623 val = (ESR_INT_SRDY0_P1 |
624 ESR_INT_DET0_P1 |
625 ESR_INT_XSRDY_P1 |
626 ESR_INT_XDP_P1_CH3 |
627 ESR_INT_XDP_P1_CH2 |
628 ESR_INT_XDP_P1_CH1 |
629 ESR_INT_XDP_P1_CH0);
630 break;
632 default:
633 return -EINVAL;
636 while (max_retry--) {
637 sig = nr64(ESR_INT_SIGNALS);
638 if ((sig & mask) == val)
639 break;
641 mdelay(500);
644 if ((sig & mask) != val) {
645 pr_info(PFX "NIU Port %u signal bits [%08x] are not "
646 "[%08x] for 10G...trying 1G\n",
647 np->port, (int) (sig & mask), (int) val);
649 /* 10G failed, try initializing at 1G */
650 err = serdes_init_niu_1g_serdes(np);
651 if (!err) {
652 np->flags &= ~NIU_FLAGS_10G;
653 np->mac_xcvr = MAC_XCVR_PCS;
654 } else {
655 dev_err(np->device, PFX "Port %u 10G/1G SERDES "
656 "Link Failed \n", np->port);
657 return -ENODEV;
660 return 0;
663 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
665 int err;
667 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
668 if (err >= 0) {
669 *val = (err & 0xffff);
670 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
671 ESR_RXTX_CTRL_H(chan));
672 if (err >= 0)
673 *val |= ((err & 0xffff) << 16);
674 err = 0;
676 return err;
679 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
681 int err;
683 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
684 ESR_GLUE_CTRL0_L(chan));
685 if (err >= 0) {
686 *val = (err & 0xffff);
687 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
688 ESR_GLUE_CTRL0_H(chan));
689 if (err >= 0) {
690 *val |= ((err & 0xffff) << 16);
691 err = 0;
694 return err;
697 static int esr_read_reset(struct niu *np, u32 *val)
699 int err;
701 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
702 ESR_RXTX_RESET_CTRL_L);
703 if (err >= 0) {
704 *val = (err & 0xffff);
705 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
706 ESR_RXTX_RESET_CTRL_H);
707 if (err >= 0) {
708 *val |= ((err & 0xffff) << 16);
709 err = 0;
712 return err;
715 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
717 int err;
719 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720 ESR_RXTX_CTRL_L(chan), val & 0xffff);
721 if (!err)
722 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
723 ESR_RXTX_CTRL_H(chan), (val >> 16));
724 return err;
727 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
729 int err;
731 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
732 ESR_GLUE_CTRL0_L(chan), val & 0xffff);
733 if (!err)
734 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
735 ESR_GLUE_CTRL0_H(chan), (val >> 16));
736 return err;
739 static int esr_reset(struct niu *np)
741 u32 reset;
742 int err;
744 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
745 ESR_RXTX_RESET_CTRL_L, 0x0000);
746 if (err)
747 return err;
748 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
749 ESR_RXTX_RESET_CTRL_H, 0xffff);
750 if (err)
751 return err;
752 udelay(200);
754 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
755 ESR_RXTX_RESET_CTRL_L, 0xffff);
756 if (err)
757 return err;
758 udelay(200);
760 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
761 ESR_RXTX_RESET_CTRL_H, 0x0000);
762 if (err)
763 return err;
764 udelay(200);
766 err = esr_read_reset(np, &reset);
767 if (err)
768 return err;
769 if (reset != 0) {
770 dev_err(np->device, PFX "Port %u ESR_RESET "
771 "did not clear [%08x]\n",
772 np->port, reset);
773 return -ENODEV;
776 return 0;
779 static int serdes_init_10g(struct niu *np)
781 struct niu_link_config *lp = &np->link_config;
782 unsigned long ctrl_reg, test_cfg_reg, i;
783 u64 ctrl_val, test_cfg_val, sig, mask, val;
784 int err;
786 switch (np->port) {
787 case 0:
788 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
789 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
790 break;
791 case 1:
792 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
793 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
794 break;
796 default:
797 return -EINVAL;
799 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
800 ENET_SERDES_CTRL_SDET_1 |
801 ENET_SERDES_CTRL_SDET_2 |
802 ENET_SERDES_CTRL_SDET_3 |
803 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
804 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
805 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
806 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
807 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
808 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
809 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
810 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
811 test_cfg_val = 0;
813 if (lp->loopback_mode == LOOPBACK_PHY) {
814 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
815 ENET_SERDES_TEST_MD_0_SHIFT) |
816 (ENET_TEST_MD_PAD_LOOPBACK <<
817 ENET_SERDES_TEST_MD_1_SHIFT) |
818 (ENET_TEST_MD_PAD_LOOPBACK <<
819 ENET_SERDES_TEST_MD_2_SHIFT) |
820 (ENET_TEST_MD_PAD_LOOPBACK <<
821 ENET_SERDES_TEST_MD_3_SHIFT));
824 nw64(ctrl_reg, ctrl_val);
825 nw64(test_cfg_reg, test_cfg_val);
827 /* Initialize all 4 lanes of the SERDES. */
828 for (i = 0; i < 4; i++) {
829 u32 rxtx_ctrl, glue0;
831 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
832 if (err)
833 return err;
834 err = esr_read_glue0(np, i, &glue0);
835 if (err)
836 return err;
838 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
839 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
840 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
842 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
843 ESR_GLUE_CTRL0_THCNT |
844 ESR_GLUE_CTRL0_BLTIME);
845 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
846 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
847 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
848 (BLTIME_300_CYCLES <<
849 ESR_GLUE_CTRL0_BLTIME_SHIFT));
851 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
852 if (err)
853 return err;
854 err = esr_write_glue0(np, i, glue0);
855 if (err)
856 return err;
859 err = esr_reset(np);
860 if (err)
861 return err;
863 sig = nr64(ESR_INT_SIGNALS);
864 switch (np->port) {
865 case 0:
866 mask = ESR_INT_SIGNALS_P0_BITS;
867 val = (ESR_INT_SRDY0_P0 |
868 ESR_INT_DET0_P0 |
869 ESR_INT_XSRDY_P0 |
870 ESR_INT_XDP_P0_CH3 |
871 ESR_INT_XDP_P0_CH2 |
872 ESR_INT_XDP_P0_CH1 |
873 ESR_INT_XDP_P0_CH0);
874 break;
876 case 1:
877 mask = ESR_INT_SIGNALS_P1_BITS;
878 val = (ESR_INT_SRDY0_P1 |
879 ESR_INT_DET0_P1 |
880 ESR_INT_XSRDY_P1 |
881 ESR_INT_XDP_P1_CH3 |
882 ESR_INT_XDP_P1_CH2 |
883 ESR_INT_XDP_P1_CH1 |
884 ESR_INT_XDP_P1_CH0);
885 break;
887 default:
888 return -EINVAL;
891 if ((sig & mask) != val) {
892 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
893 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
894 return 0;
896 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
897 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
898 return -ENODEV;
900 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
901 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
902 return 0;
905 static int serdes_init_1g(struct niu *np)
907 u64 val;
909 val = nr64(ENET_SERDES_1_PLL_CFG);
910 val &= ~ENET_SERDES_PLL_FBDIV2;
911 switch (np->port) {
912 case 0:
913 val |= ENET_SERDES_PLL_HRATE0;
914 break;
915 case 1:
916 val |= ENET_SERDES_PLL_HRATE1;
917 break;
918 case 2:
919 val |= ENET_SERDES_PLL_HRATE2;
920 break;
921 case 3:
922 val |= ENET_SERDES_PLL_HRATE3;
923 break;
924 default:
925 return -EINVAL;
927 nw64(ENET_SERDES_1_PLL_CFG, val);
929 return 0;
932 static int serdes_init_1g_serdes(struct niu *np)
934 struct niu_link_config *lp = &np->link_config;
935 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
936 u64 ctrl_val, test_cfg_val, sig, mask, val;
937 int err;
938 u64 reset_val, val_rd;
940 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
941 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
942 ENET_SERDES_PLL_FBDIV0;
943 switch (np->port) {
944 case 0:
945 reset_val = ENET_SERDES_RESET_0;
946 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
947 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
948 pll_cfg = ENET_SERDES_0_PLL_CFG;
949 break;
950 case 1:
951 reset_val = ENET_SERDES_RESET_1;
952 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
953 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
954 pll_cfg = ENET_SERDES_1_PLL_CFG;
955 break;
957 default:
958 return -EINVAL;
960 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
961 ENET_SERDES_CTRL_SDET_1 |
962 ENET_SERDES_CTRL_SDET_2 |
963 ENET_SERDES_CTRL_SDET_3 |
964 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
965 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
966 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
967 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
968 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
969 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
970 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
971 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
972 test_cfg_val = 0;
974 if (lp->loopback_mode == LOOPBACK_PHY) {
975 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
976 ENET_SERDES_TEST_MD_0_SHIFT) |
977 (ENET_TEST_MD_PAD_LOOPBACK <<
978 ENET_SERDES_TEST_MD_1_SHIFT) |
979 (ENET_TEST_MD_PAD_LOOPBACK <<
980 ENET_SERDES_TEST_MD_2_SHIFT) |
981 (ENET_TEST_MD_PAD_LOOPBACK <<
982 ENET_SERDES_TEST_MD_3_SHIFT));
985 nw64(ENET_SERDES_RESET, reset_val);
986 mdelay(20);
987 val_rd = nr64(ENET_SERDES_RESET);
988 val_rd &= ~reset_val;
989 nw64(pll_cfg, val);
990 nw64(ctrl_reg, ctrl_val);
991 nw64(test_cfg_reg, test_cfg_val);
992 nw64(ENET_SERDES_RESET, val_rd);
993 mdelay(2000);
995 /* Initialize all 4 lanes of the SERDES. */
996 for (i = 0; i < 4; i++) {
997 u32 rxtx_ctrl, glue0;
999 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
1000 if (err)
1001 return err;
1002 err = esr_read_glue0(np, i, &glue0);
1003 if (err)
1004 return err;
1006 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
1007 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
1008 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
1010 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
1011 ESR_GLUE_CTRL0_THCNT |
1012 ESR_GLUE_CTRL0_BLTIME);
1013 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
1014 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
1015 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
1016 (BLTIME_300_CYCLES <<
1017 ESR_GLUE_CTRL0_BLTIME_SHIFT));
1019 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
1020 if (err)
1021 return err;
1022 err = esr_write_glue0(np, i, glue0);
1023 if (err)
1024 return err;
1028 sig = nr64(ESR_INT_SIGNALS);
1029 switch (np->port) {
1030 case 0:
1031 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1032 mask = val;
1033 break;
1035 case 1:
1036 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1037 mask = val;
1038 break;
1040 default:
1041 return -EINVAL;
1044 if ((sig & mask) != val) {
1045 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
1046 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
1047 return -ENODEV;
1050 return 0;
1053 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1055 struct niu_link_config *lp = &np->link_config;
1056 int link_up;
1057 u64 val;
1058 u16 current_speed;
1059 unsigned long flags;
1060 u8 current_duplex;
1062 link_up = 0;
1063 current_speed = SPEED_INVALID;
1064 current_duplex = DUPLEX_INVALID;
1066 spin_lock_irqsave(&np->lock, flags);
1068 val = nr64_pcs(PCS_MII_STAT);
1070 if (val & PCS_MII_STAT_LINK_STATUS) {
1071 link_up = 1;
1072 current_speed = SPEED_1000;
1073 current_duplex = DUPLEX_FULL;
1076 lp->active_speed = current_speed;
1077 lp->active_duplex = current_duplex;
1078 spin_unlock_irqrestore(&np->lock, flags);
1080 *link_up_p = link_up;
1081 return 0;
1084 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1086 unsigned long flags;
1087 struct niu_link_config *lp = &np->link_config;
1088 int link_up = 0;
1089 int link_ok = 1;
1090 u64 val, val2;
1091 u16 current_speed;
1092 u8 current_duplex;
1094 if (!(np->flags & NIU_FLAGS_10G))
1095 return link_status_1g_serdes(np, link_up_p);
1097 current_speed = SPEED_INVALID;
1098 current_duplex = DUPLEX_INVALID;
1099 spin_lock_irqsave(&np->lock, flags);
1101 val = nr64_xpcs(XPCS_STATUS(0));
1102 val2 = nr64_mac(XMAC_INTER2);
1103 if (val2 & 0x01000000)
1104 link_ok = 0;
1106 if ((val & 0x1000ULL) && link_ok) {
1107 link_up = 1;
1108 current_speed = SPEED_10000;
1109 current_duplex = DUPLEX_FULL;
1111 lp->active_speed = current_speed;
1112 lp->active_duplex = current_duplex;
1113 spin_unlock_irqrestore(&np->lock, flags);
1114 *link_up_p = link_up;
1115 return 0;
1118 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1120 struct niu_link_config *lp = &np->link_config;
1121 u16 current_speed, bmsr;
1122 unsigned long flags;
1123 u8 current_duplex;
1124 int err, link_up;
1126 link_up = 0;
1127 current_speed = SPEED_INVALID;
1128 current_duplex = DUPLEX_INVALID;
1130 spin_lock_irqsave(&np->lock, flags);
1132 err = -EINVAL;
1134 err = mii_read(np, np->phy_addr, MII_BMSR);
1135 if (err < 0)
1136 goto out;
1138 bmsr = err;
1139 if (bmsr & BMSR_LSTATUS) {
1140 u16 adv, lpa, common, estat;
1142 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1143 if (err < 0)
1144 goto out;
1145 adv = err;
1147 err = mii_read(np, np->phy_addr, MII_LPA);
1148 if (err < 0)
1149 goto out;
1150 lpa = err;
1152 common = adv & lpa;
1154 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1155 if (err < 0)
1156 goto out;
1157 estat = err;
1158 link_up = 1;
1159 current_speed = SPEED_1000;
1160 current_duplex = DUPLEX_FULL;
1163 lp->active_speed = current_speed;
1164 lp->active_duplex = current_duplex;
1165 err = 0;
1167 out:
1168 spin_unlock_irqrestore(&np->lock, flags);
1170 *link_up_p = link_up;
1171 return err;
1174 static int bcm8704_reset(struct niu *np)
1176 int err, limit;
1178 err = mdio_read(np, np->phy_addr,
1179 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1180 if (err < 0)
1181 return err;
1182 err |= BMCR_RESET;
1183 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1184 MII_BMCR, err);
1185 if (err)
1186 return err;
1188 limit = 1000;
1189 while (--limit >= 0) {
1190 err = mdio_read(np, np->phy_addr,
1191 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1192 if (err < 0)
1193 return err;
1194 if (!(err & BMCR_RESET))
1195 break;
1197 if (limit < 0) {
1198 dev_err(np->device, PFX "Port %u PHY will not reset "
1199 "(bmcr=%04x)\n", np->port, (err & 0xffff));
1200 return -ENODEV;
1202 return 0;
1205 /* When written, certain PHY registers need to be read back twice
1206 * in order for the bits to settle properly.
1208 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1210 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1211 if (err < 0)
1212 return err;
1213 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1214 if (err < 0)
1215 return err;
1216 return 0;
1219 static int bcm8706_init_user_dev3(struct niu *np)
1221 int err;
1224 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1225 BCM8704_USER_OPT_DIGITAL_CTRL);
1226 if (err < 0)
1227 return err;
1228 err &= ~USER_ODIG_CTRL_GPIOS;
1229 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1230 err |= USER_ODIG_CTRL_RESV2;
1231 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1232 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1233 if (err)
1234 return err;
1236 mdelay(1000);
1238 return 0;
1241 static int bcm8704_init_user_dev3(struct niu *np)
1243 int err;
1245 err = mdio_write(np, np->phy_addr,
1246 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1247 (USER_CONTROL_OPTXRST_LVL |
1248 USER_CONTROL_OPBIASFLT_LVL |
1249 USER_CONTROL_OBTMPFLT_LVL |
1250 USER_CONTROL_OPPRFLT_LVL |
1251 USER_CONTROL_OPTXFLT_LVL |
1252 USER_CONTROL_OPRXLOS_LVL |
1253 USER_CONTROL_OPRXFLT_LVL |
1254 USER_CONTROL_OPTXON_LVL |
1255 (0x3f << USER_CONTROL_RES1_SHIFT)));
1256 if (err)
1257 return err;
1259 err = mdio_write(np, np->phy_addr,
1260 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1261 (USER_PMD_TX_CTL_XFP_CLKEN |
1262 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1263 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1264 USER_PMD_TX_CTL_TSCK_LPWREN));
1265 if (err)
1266 return err;
1268 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1269 if (err)
1270 return err;
1271 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1272 if (err)
1273 return err;
1275 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1276 BCM8704_USER_OPT_DIGITAL_CTRL);
1277 if (err < 0)
1278 return err;
1279 err &= ~USER_ODIG_CTRL_GPIOS;
1280 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1281 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1282 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1283 if (err)
1284 return err;
1286 mdelay(1000);
1288 return 0;
1291 static int mrvl88x2011_act_led(struct niu *np, int val)
1293 int err;
1295 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1296 MRVL88X2011_LED_8_TO_11_CTL);
1297 if (err < 0)
1298 return err;
1300 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1301 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1303 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1304 MRVL88X2011_LED_8_TO_11_CTL, err);
1307 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1309 int err;
1311 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1312 MRVL88X2011_LED_BLINK_CTL);
1313 if (err >= 0) {
1314 err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1315 err |= (rate << 4);
1317 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1318 MRVL88X2011_LED_BLINK_CTL, err);
1321 return err;
1324 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1326 int err;
1328 /* Set LED functions */
1329 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1330 if (err)
1331 return err;
1333 /* led activity */
1334 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1335 if (err)
1336 return err;
1338 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1339 MRVL88X2011_GENERAL_CTL);
1340 if (err < 0)
1341 return err;
1343 err |= MRVL88X2011_ENA_XFPREFCLK;
1345 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1346 MRVL88X2011_GENERAL_CTL, err);
1347 if (err < 0)
1348 return err;
1350 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1351 MRVL88X2011_PMA_PMD_CTL_1);
1352 if (err < 0)
1353 return err;
1355 if (np->link_config.loopback_mode == LOOPBACK_MAC)
1356 err |= MRVL88X2011_LOOPBACK;
1357 else
1358 err &= ~MRVL88X2011_LOOPBACK;
1360 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1361 MRVL88X2011_PMA_PMD_CTL_1, err);
1362 if (err < 0)
1363 return err;
1365 /* Enable PMD */
1366 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1367 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1371 static int xcvr_diag_bcm870x(struct niu *np)
1373 u16 analog_stat0, tx_alarm_status;
1374 int err = 0;
1376 #if 1
1377 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1378 MII_STAT1000);
1379 if (err < 0)
1380 return err;
1381 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1382 np->port, err);
1384 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1385 if (err < 0)
1386 return err;
1387 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1388 np->port, err);
1390 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1391 MII_NWAYTEST);
1392 if (err < 0)
1393 return err;
1394 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1395 np->port, err);
1396 #endif
1398 /* XXX dig this out it might not be so useful XXX */
1399 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1400 BCM8704_USER_ANALOG_STATUS0);
1401 if (err < 0)
1402 return err;
1403 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1404 BCM8704_USER_ANALOG_STATUS0);
1405 if (err < 0)
1406 return err;
1407 analog_stat0 = err;
1409 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1410 BCM8704_USER_TX_ALARM_STATUS);
1411 if (err < 0)
1412 return err;
1413 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1414 BCM8704_USER_TX_ALARM_STATUS);
1415 if (err < 0)
1416 return err;
1417 tx_alarm_status = err;
1419 if (analog_stat0 != 0x03fc) {
1420 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1421 pr_info(PFX "Port %u cable not connected "
1422 "or bad cable.\n", np->port);
1423 } else if (analog_stat0 == 0x639c) {
1424 pr_info(PFX "Port %u optical module is bad "
1425 "or missing.\n", np->port);
1429 return 0;
1432 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1434 struct niu_link_config *lp = &np->link_config;
1435 int err;
1437 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1438 MII_BMCR);
1439 if (err < 0)
1440 return err;
1442 err &= ~BMCR_LOOPBACK;
1444 if (lp->loopback_mode == LOOPBACK_MAC)
1445 err |= BMCR_LOOPBACK;
1447 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1448 MII_BMCR, err);
1449 if (err)
1450 return err;
1452 return 0;
1455 static int xcvr_init_10g_bcm8706(struct niu *np)
1457 int err = 0;
1458 u64 val;
1460 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1461 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1462 return err;
1464 val = nr64_mac(XMAC_CONFIG);
1465 val &= ~XMAC_CONFIG_LED_POLARITY;
1466 val |= XMAC_CONFIG_FORCE_LED_ON;
1467 nw64_mac(XMAC_CONFIG, val);
1469 val = nr64(MIF_CONFIG);
1470 val |= MIF_CONFIG_INDIRECT_MODE;
1471 nw64(MIF_CONFIG, val);
1473 err = bcm8704_reset(np);
1474 if (err)
1475 return err;
1477 err = xcvr_10g_set_lb_bcm870x(np);
1478 if (err)
1479 return err;
1481 err = bcm8706_init_user_dev3(np);
1482 if (err)
1483 return err;
1485 err = xcvr_diag_bcm870x(np);
1486 if (err)
1487 return err;
1489 return 0;
1492 static int xcvr_init_10g_bcm8704(struct niu *np)
1494 int err;
1496 err = bcm8704_reset(np);
1497 if (err)
1498 return err;
1500 err = bcm8704_init_user_dev3(np);
1501 if (err)
1502 return err;
1504 err = xcvr_10g_set_lb_bcm870x(np);
1505 if (err)
1506 return err;
1508 err = xcvr_diag_bcm870x(np);
1509 if (err)
1510 return err;
1512 return 0;
1515 static int xcvr_init_10g(struct niu *np)
1517 int phy_id, err;
1518 u64 val;
1520 val = nr64_mac(XMAC_CONFIG);
1521 val &= ~XMAC_CONFIG_LED_POLARITY;
1522 val |= XMAC_CONFIG_FORCE_LED_ON;
1523 nw64_mac(XMAC_CONFIG, val);
1525 /* XXX shared resource, lock parent XXX */
1526 val = nr64(MIF_CONFIG);
1527 val |= MIF_CONFIG_INDIRECT_MODE;
1528 nw64(MIF_CONFIG, val);
1530 phy_id = phy_decode(np->parent->port_phy, np->port);
1531 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1533 /* handle different phy types */
1534 switch (phy_id & NIU_PHY_ID_MASK) {
1535 case NIU_PHY_ID_MRVL88X2011:
1536 err = xcvr_init_10g_mrvl88x2011(np);
1537 break;
1539 default: /* bcom 8704 */
1540 err = xcvr_init_10g_bcm8704(np);
1541 break;
1544 return 0;
1547 static int mii_reset(struct niu *np)
1549 int limit, err;
1551 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1552 if (err)
1553 return err;
1555 limit = 1000;
1556 while (--limit >= 0) {
1557 udelay(500);
1558 err = mii_read(np, np->phy_addr, MII_BMCR);
1559 if (err < 0)
1560 return err;
1561 if (!(err & BMCR_RESET))
1562 break;
1564 if (limit < 0) {
1565 dev_err(np->device, PFX "Port %u MII would not reset, "
1566 "bmcr[%04x]\n", np->port, err);
1567 return -ENODEV;
1570 return 0;
1573 static int xcvr_init_1g_rgmii(struct niu *np)
1575 int err;
1576 u64 val;
1577 u16 bmcr, bmsr, estat;
1579 val = nr64(MIF_CONFIG);
1580 val &= ~MIF_CONFIG_INDIRECT_MODE;
1581 nw64(MIF_CONFIG, val);
1583 err = mii_reset(np);
1584 if (err)
1585 return err;
1587 err = mii_read(np, np->phy_addr, MII_BMSR);
1588 if (err < 0)
1589 return err;
1590 bmsr = err;
1592 estat = 0;
1593 if (bmsr & BMSR_ESTATEN) {
1594 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1595 if (err < 0)
1596 return err;
1597 estat = err;
1600 bmcr = 0;
1601 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1602 if (err)
1603 return err;
1605 if (bmsr & BMSR_ESTATEN) {
1606 u16 ctrl1000 = 0;
1608 if (estat & ESTATUS_1000_TFULL)
1609 ctrl1000 |= ADVERTISE_1000FULL;
1610 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1611 if (err)
1612 return err;
1615 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1617 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1618 if (err)
1619 return err;
1621 err = mii_read(np, np->phy_addr, MII_BMCR);
1622 if (err < 0)
1623 return err;
1624 bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1626 err = mii_read(np, np->phy_addr, MII_BMSR);
1627 if (err < 0)
1628 return err;
1630 return 0;
1633 static int mii_init_common(struct niu *np)
1635 struct niu_link_config *lp = &np->link_config;
1636 u16 bmcr, bmsr, adv, estat;
1637 int err;
1639 err = mii_reset(np);
1640 if (err)
1641 return err;
1643 err = mii_read(np, np->phy_addr, MII_BMSR);
1644 if (err < 0)
1645 return err;
1646 bmsr = err;
1648 estat = 0;
1649 if (bmsr & BMSR_ESTATEN) {
1650 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1651 if (err < 0)
1652 return err;
1653 estat = err;
1656 bmcr = 0;
1657 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1658 if (err)
1659 return err;
1661 if (lp->loopback_mode == LOOPBACK_MAC) {
1662 bmcr |= BMCR_LOOPBACK;
1663 if (lp->active_speed == SPEED_1000)
1664 bmcr |= BMCR_SPEED1000;
1665 if (lp->active_duplex == DUPLEX_FULL)
1666 bmcr |= BMCR_FULLDPLX;
1669 if (lp->loopback_mode == LOOPBACK_PHY) {
1670 u16 aux;
1672 aux = (BCM5464R_AUX_CTL_EXT_LB |
1673 BCM5464R_AUX_CTL_WRITE_1);
1674 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1675 if (err)
1676 return err;
1679 /* XXX configurable XXX */
1680 /* XXX for now don't advertise half-duplex or asym pause... XXX */
1681 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1682 if (bmsr & BMSR_10FULL)
1683 adv |= ADVERTISE_10FULL;
1684 if (bmsr & BMSR_100FULL)
1685 adv |= ADVERTISE_100FULL;
1686 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1687 if (err)
1688 return err;
1690 if (bmsr & BMSR_ESTATEN) {
1691 u16 ctrl1000 = 0;
1693 if (estat & ESTATUS_1000_TFULL)
1694 ctrl1000 |= ADVERTISE_1000FULL;
1695 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1696 if (err)
1697 return err;
1699 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1701 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1702 if (err)
1703 return err;
1705 err = mii_read(np, np->phy_addr, MII_BMCR);
1706 if (err < 0)
1707 return err;
1708 err = mii_read(np, np->phy_addr, MII_BMSR);
1709 if (err < 0)
1710 return err;
1711 #if 0
1712 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1713 np->port, bmcr, bmsr);
1714 #endif
1716 return 0;
1719 static int xcvr_init_1g(struct niu *np)
1721 u64 val;
1723 /* XXX shared resource, lock parent XXX */
1724 val = nr64(MIF_CONFIG);
1725 val &= ~MIF_CONFIG_INDIRECT_MODE;
1726 nw64(MIF_CONFIG, val);
1728 return mii_init_common(np);
1731 static int niu_xcvr_init(struct niu *np)
1733 const struct niu_phy_ops *ops = np->phy_ops;
1734 int err;
1736 err = 0;
1737 if (ops->xcvr_init)
1738 err = ops->xcvr_init(np);
1740 return err;
1743 static int niu_serdes_init(struct niu *np)
1745 const struct niu_phy_ops *ops = np->phy_ops;
1746 int err;
1748 err = 0;
1749 if (ops->serdes_init)
1750 err = ops->serdes_init(np);
1752 return err;
1755 static void niu_init_xif(struct niu *);
1756 static void niu_handle_led(struct niu *, int status);
1758 static int niu_link_status_common(struct niu *np, int link_up)
1760 struct niu_link_config *lp = &np->link_config;
1761 struct net_device *dev = np->dev;
1762 unsigned long flags;
1764 if (!netif_carrier_ok(dev) && link_up) {
1765 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1766 dev->name,
1767 (lp->active_speed == SPEED_10000 ?
1768 "10Gb/sec" :
1769 (lp->active_speed == SPEED_1000 ?
1770 "1Gb/sec" :
1771 (lp->active_speed == SPEED_100 ?
1772 "100Mbit/sec" : "10Mbit/sec"))),
1773 (lp->active_duplex == DUPLEX_FULL ?
1774 "full" : "half"));
1776 spin_lock_irqsave(&np->lock, flags);
1777 niu_init_xif(np);
1778 niu_handle_led(np, 1);
1779 spin_unlock_irqrestore(&np->lock, flags);
1781 netif_carrier_on(dev);
1782 } else if (netif_carrier_ok(dev) && !link_up) {
1783 niuwarn(LINK, "%s: Link is down\n", dev->name);
1784 spin_lock_irqsave(&np->lock, flags);
1785 niu_handle_led(np, 0);
1786 spin_unlock_irqrestore(&np->lock, flags);
1787 netif_carrier_off(dev);
1790 return 0;
1793 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1795 int err, link_up, pma_status, pcs_status;
1797 link_up = 0;
1799 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1800 MRVL88X2011_10G_PMD_STATUS_2);
1801 if (err < 0)
1802 goto out;
1804 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1805 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1806 MRVL88X2011_PMA_PMD_STATUS_1);
1807 if (err < 0)
1808 goto out;
1810 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1812 /* Check PMC Register : 3.0001.2 == 1: read twice */
1813 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1814 MRVL88X2011_PMA_PMD_STATUS_1);
1815 if (err < 0)
1816 goto out;
1818 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1819 MRVL88X2011_PMA_PMD_STATUS_1);
1820 if (err < 0)
1821 goto out;
1823 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1825 /* Check XGXS Register : 4.0018.[0-3,12] */
1826 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1827 MRVL88X2011_10G_XGXS_LANE_STAT);
1828 if (err < 0)
1829 goto out;
1831 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1832 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1833 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1834 0x800))
1835 link_up = (pma_status && pcs_status) ? 1 : 0;
1837 np->link_config.active_speed = SPEED_10000;
1838 np->link_config.active_duplex = DUPLEX_FULL;
1839 err = 0;
1840 out:
1841 mrvl88x2011_act_led(np, (link_up ?
1842 MRVL88X2011_LED_CTL_PCS_ACT :
1843 MRVL88X2011_LED_CTL_OFF));
1845 *link_up_p = link_up;
1846 return err;
1849 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1851 int err, link_up;
1852 link_up = 0;
1854 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1855 BCM8704_PMD_RCV_SIGDET);
1856 if (err < 0)
1857 goto out;
1858 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1859 err = 0;
1860 goto out;
1863 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1864 BCM8704_PCS_10G_R_STATUS);
1865 if (err < 0)
1866 goto out;
1868 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1869 err = 0;
1870 goto out;
1873 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1874 BCM8704_PHYXS_XGXS_LANE_STAT);
1875 if (err < 0)
1876 goto out;
1877 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1878 PHYXS_XGXS_LANE_STAT_MAGIC |
1879 PHYXS_XGXS_LANE_STAT_PATTEST |
1880 PHYXS_XGXS_LANE_STAT_LANE3 |
1881 PHYXS_XGXS_LANE_STAT_LANE2 |
1882 PHYXS_XGXS_LANE_STAT_LANE1 |
1883 PHYXS_XGXS_LANE_STAT_LANE0)) {
1884 err = 0;
1885 np->link_config.active_speed = SPEED_INVALID;
1886 np->link_config.active_duplex = DUPLEX_INVALID;
1887 goto out;
1890 link_up = 1;
1891 np->link_config.active_speed = SPEED_10000;
1892 np->link_config.active_duplex = DUPLEX_FULL;
1893 err = 0;
1895 out:
1896 *link_up_p = link_up;
1897 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1898 err = 0;
1899 return err;
1902 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1904 int err, link_up;
1906 link_up = 0;
1908 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1909 BCM8704_PMD_RCV_SIGDET);
1910 if (err < 0)
1911 goto out;
1912 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1913 err = 0;
1914 goto out;
1917 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1918 BCM8704_PCS_10G_R_STATUS);
1919 if (err < 0)
1920 goto out;
1921 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1922 err = 0;
1923 goto out;
1926 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1927 BCM8704_PHYXS_XGXS_LANE_STAT);
1928 if (err < 0)
1929 goto out;
1931 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1932 PHYXS_XGXS_LANE_STAT_MAGIC |
1933 PHYXS_XGXS_LANE_STAT_LANE3 |
1934 PHYXS_XGXS_LANE_STAT_LANE2 |
1935 PHYXS_XGXS_LANE_STAT_LANE1 |
1936 PHYXS_XGXS_LANE_STAT_LANE0)) {
1937 err = 0;
1938 goto out;
1941 link_up = 1;
1942 np->link_config.active_speed = SPEED_10000;
1943 np->link_config.active_duplex = DUPLEX_FULL;
1944 err = 0;
1946 out:
1947 *link_up_p = link_up;
1948 return err;
1951 static int link_status_10g(struct niu *np, int *link_up_p)
1953 unsigned long flags;
1954 int err = -EINVAL;
1956 spin_lock_irqsave(&np->lock, flags);
1958 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1959 int phy_id;
1961 phy_id = phy_decode(np->parent->port_phy, np->port);
1962 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1964 /* handle different phy types */
1965 switch (phy_id & NIU_PHY_ID_MASK) {
1966 case NIU_PHY_ID_MRVL88X2011:
1967 err = link_status_10g_mrvl(np, link_up_p);
1968 break;
1970 default: /* bcom 8704 */
1971 err = link_status_10g_bcom(np, link_up_p);
1972 break;
1976 spin_unlock_irqrestore(&np->lock, flags);
1978 return err;
1981 static int niu_10g_phy_present(struct niu *np)
1983 u64 sig, mask, val;
1985 sig = nr64(ESR_INT_SIGNALS);
1986 switch (np->port) {
1987 case 0:
1988 mask = ESR_INT_SIGNALS_P0_BITS;
1989 val = (ESR_INT_SRDY0_P0 |
1990 ESR_INT_DET0_P0 |
1991 ESR_INT_XSRDY_P0 |
1992 ESR_INT_XDP_P0_CH3 |
1993 ESR_INT_XDP_P0_CH2 |
1994 ESR_INT_XDP_P0_CH1 |
1995 ESR_INT_XDP_P0_CH0);
1996 break;
1998 case 1:
1999 mask = ESR_INT_SIGNALS_P1_BITS;
2000 val = (ESR_INT_SRDY0_P1 |
2001 ESR_INT_DET0_P1 |
2002 ESR_INT_XSRDY_P1 |
2003 ESR_INT_XDP_P1_CH3 |
2004 ESR_INT_XDP_P1_CH2 |
2005 ESR_INT_XDP_P1_CH1 |
2006 ESR_INT_XDP_P1_CH0);
2007 break;
2009 default:
2010 return 0;
2013 if ((sig & mask) != val)
2014 return 0;
2015 return 1;
2018 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2020 unsigned long flags;
2021 int err = 0;
2022 int phy_present;
2023 int phy_present_prev;
2025 spin_lock_irqsave(&np->lock, flags);
2027 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2028 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2029 1 : 0;
2030 phy_present = niu_10g_phy_present(np);
2031 if (phy_present != phy_present_prev) {
2032 /* state change */
2033 if (phy_present) {
2034 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2035 if (np->phy_ops->xcvr_init)
2036 err = np->phy_ops->xcvr_init(np);
2037 if (err) {
2038 /* debounce */
2039 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2041 } else {
2042 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2043 *link_up_p = 0;
2044 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
2045 np->dev->name);
2048 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
2049 err = link_status_10g_bcm8706(np, link_up_p);
2052 spin_unlock_irqrestore(&np->lock, flags);
2054 return err;
2057 static int link_status_1g(struct niu *np, int *link_up_p)
2059 struct niu_link_config *lp = &np->link_config;
2060 u16 current_speed, bmsr;
2061 unsigned long flags;
2062 u8 current_duplex;
2063 int err, link_up;
2065 link_up = 0;
2066 current_speed = SPEED_INVALID;
2067 current_duplex = DUPLEX_INVALID;
2069 spin_lock_irqsave(&np->lock, flags);
2071 err = -EINVAL;
2072 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
2073 goto out;
2075 err = mii_read(np, np->phy_addr, MII_BMSR);
2076 if (err < 0)
2077 goto out;
2079 bmsr = err;
2080 if (bmsr & BMSR_LSTATUS) {
2081 u16 adv, lpa, common, estat;
2083 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
2084 if (err < 0)
2085 goto out;
2086 adv = err;
2088 err = mii_read(np, np->phy_addr, MII_LPA);
2089 if (err < 0)
2090 goto out;
2091 lpa = err;
2093 common = adv & lpa;
2095 err = mii_read(np, np->phy_addr, MII_ESTATUS);
2096 if (err < 0)
2097 goto out;
2098 estat = err;
2100 link_up = 1;
2101 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
2102 current_speed = SPEED_1000;
2103 if (estat & ESTATUS_1000_TFULL)
2104 current_duplex = DUPLEX_FULL;
2105 else
2106 current_duplex = DUPLEX_HALF;
2107 } else {
2108 if (common & ADVERTISE_100BASE4) {
2109 current_speed = SPEED_100;
2110 current_duplex = DUPLEX_HALF;
2111 } else if (common & ADVERTISE_100FULL) {
2112 current_speed = SPEED_100;
2113 current_duplex = DUPLEX_FULL;
2114 } else if (common & ADVERTISE_100HALF) {
2115 current_speed = SPEED_100;
2116 current_duplex = DUPLEX_HALF;
2117 } else if (common & ADVERTISE_10FULL) {
2118 current_speed = SPEED_10;
2119 current_duplex = DUPLEX_FULL;
2120 } else if (common & ADVERTISE_10HALF) {
2121 current_speed = SPEED_10;
2122 current_duplex = DUPLEX_HALF;
2123 } else
2124 link_up = 0;
2127 lp->active_speed = current_speed;
2128 lp->active_duplex = current_duplex;
2129 err = 0;
2131 out:
2132 spin_unlock_irqrestore(&np->lock, flags);
2134 *link_up_p = link_up;
2135 return err;
2138 static int niu_link_status(struct niu *np, int *link_up_p)
2140 const struct niu_phy_ops *ops = np->phy_ops;
2141 int err;
2143 err = 0;
2144 if (ops->link_status)
2145 err = ops->link_status(np, link_up_p);
2147 return err;
2150 static void niu_timer(unsigned long __opaque)
2152 struct niu *np = (struct niu *) __opaque;
2153 unsigned long off;
2154 int err, link_up;
2156 err = niu_link_status(np, &link_up);
2157 if (!err)
2158 niu_link_status_common(np, link_up);
2160 if (netif_carrier_ok(np->dev))
2161 off = 5 * HZ;
2162 else
2163 off = 1 * HZ;
2164 np->timer.expires = jiffies + off;
2166 add_timer(&np->timer);
2169 static const struct niu_phy_ops phy_ops_10g_serdes = {
2170 .serdes_init = serdes_init_10g_serdes,
2171 .link_status = link_status_10g_serdes,
2174 static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2175 .serdes_init = serdes_init_niu_10g_serdes,
2176 .link_status = link_status_10g_serdes,
2179 static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2180 .serdes_init = serdes_init_niu_1g_serdes,
2181 .link_status = link_status_1g_serdes,
2184 static const struct niu_phy_ops phy_ops_1g_rgmii = {
2185 .xcvr_init = xcvr_init_1g_rgmii,
2186 .link_status = link_status_1g_rgmii,
2189 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2190 .serdes_init = serdes_init_niu_10g_fiber,
2191 .xcvr_init = xcvr_init_10g,
2192 .link_status = link_status_10g,
2195 static const struct niu_phy_ops phy_ops_10g_fiber = {
2196 .serdes_init = serdes_init_10g,
2197 .xcvr_init = xcvr_init_10g,
2198 .link_status = link_status_10g,
2201 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2202 .serdes_init = serdes_init_10g,
2203 .xcvr_init = xcvr_init_10g_bcm8706,
2204 .link_status = link_status_10g_hotplug,
2207 static const struct niu_phy_ops phy_ops_10g_copper = {
2208 .serdes_init = serdes_init_10g,
2209 .link_status = link_status_10g, /* XXX */
2212 static const struct niu_phy_ops phy_ops_1g_fiber = {
2213 .serdes_init = serdes_init_1g,
2214 .xcvr_init = xcvr_init_1g,
2215 .link_status = link_status_1g,
2218 static const struct niu_phy_ops phy_ops_1g_copper = {
2219 .xcvr_init = xcvr_init_1g,
2220 .link_status = link_status_1g,
2223 struct niu_phy_template {
2224 const struct niu_phy_ops *ops;
2225 u32 phy_addr_base;
2228 static const struct niu_phy_template phy_template_niu_10g_fiber = {
2229 .ops = &phy_ops_10g_fiber_niu,
2230 .phy_addr_base = 16,
2233 static const struct niu_phy_template phy_template_niu_10g_serdes = {
2234 .ops = &phy_ops_10g_serdes_niu,
2235 .phy_addr_base = 0,
2238 static const struct niu_phy_template phy_template_niu_1g_serdes = {
2239 .ops = &phy_ops_1g_serdes_niu,
2240 .phy_addr_base = 0,
2243 static const struct niu_phy_template phy_template_10g_fiber = {
2244 .ops = &phy_ops_10g_fiber,
2245 .phy_addr_base = 8,
2248 static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2249 .ops = &phy_ops_10g_fiber_hotplug,
2250 .phy_addr_base = 8,
2253 static const struct niu_phy_template phy_template_10g_copper = {
2254 .ops = &phy_ops_10g_copper,
2255 .phy_addr_base = 10,
2258 static const struct niu_phy_template phy_template_1g_fiber = {
2259 .ops = &phy_ops_1g_fiber,
2260 .phy_addr_base = 0,
2263 static const struct niu_phy_template phy_template_1g_copper = {
2264 .ops = &phy_ops_1g_copper,
2265 .phy_addr_base = 0,
2268 static const struct niu_phy_template phy_template_1g_rgmii = {
2269 .ops = &phy_ops_1g_rgmii,
2270 .phy_addr_base = 0,
2273 static const struct niu_phy_template phy_template_10g_serdes = {
2274 .ops = &phy_ops_10g_serdes,
2275 .phy_addr_base = 0,
2278 static int niu_atca_port_num[4] = {
2279 0, 0, 11, 10
2282 static int serdes_init_10g_serdes(struct niu *np)
2284 struct niu_link_config *lp = &np->link_config;
2285 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2286 u64 ctrl_val, test_cfg_val, sig, mask, val;
2287 int err;
2288 u64 reset_val;
2290 switch (np->port) {
2291 case 0:
2292 reset_val = ENET_SERDES_RESET_0;
2293 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2294 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2295 pll_cfg = ENET_SERDES_0_PLL_CFG;
2296 break;
2297 case 1:
2298 reset_val = ENET_SERDES_RESET_1;
2299 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2300 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2301 pll_cfg = ENET_SERDES_1_PLL_CFG;
2302 break;
2304 default:
2305 return -EINVAL;
2307 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2308 ENET_SERDES_CTRL_SDET_1 |
2309 ENET_SERDES_CTRL_SDET_2 |
2310 ENET_SERDES_CTRL_SDET_3 |
2311 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2312 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2313 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2314 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2315 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2316 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2317 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2318 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2319 test_cfg_val = 0;
2321 if (lp->loopback_mode == LOOPBACK_PHY) {
2322 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2323 ENET_SERDES_TEST_MD_0_SHIFT) |
2324 (ENET_TEST_MD_PAD_LOOPBACK <<
2325 ENET_SERDES_TEST_MD_1_SHIFT) |
2326 (ENET_TEST_MD_PAD_LOOPBACK <<
2327 ENET_SERDES_TEST_MD_2_SHIFT) |
2328 (ENET_TEST_MD_PAD_LOOPBACK <<
2329 ENET_SERDES_TEST_MD_3_SHIFT));
2332 esr_reset(np);
2333 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2334 nw64(ctrl_reg, ctrl_val);
2335 nw64(test_cfg_reg, test_cfg_val);
2337 /* Initialize all 4 lanes of the SERDES. */
2338 for (i = 0; i < 4; i++) {
2339 u32 rxtx_ctrl, glue0;
2341 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2342 if (err)
2343 return err;
2344 err = esr_read_glue0(np, i, &glue0);
2345 if (err)
2346 return err;
2348 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2349 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2350 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2352 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2353 ESR_GLUE_CTRL0_THCNT |
2354 ESR_GLUE_CTRL0_BLTIME);
2355 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2356 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2357 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2358 (BLTIME_300_CYCLES <<
2359 ESR_GLUE_CTRL0_BLTIME_SHIFT));
2361 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2362 if (err)
2363 return err;
2364 err = esr_write_glue0(np, i, glue0);
2365 if (err)
2366 return err;
2370 sig = nr64(ESR_INT_SIGNALS);
2371 switch (np->port) {
2372 case 0:
2373 mask = ESR_INT_SIGNALS_P0_BITS;
2374 val = (ESR_INT_SRDY0_P0 |
2375 ESR_INT_DET0_P0 |
2376 ESR_INT_XSRDY_P0 |
2377 ESR_INT_XDP_P0_CH3 |
2378 ESR_INT_XDP_P0_CH2 |
2379 ESR_INT_XDP_P0_CH1 |
2380 ESR_INT_XDP_P0_CH0);
2381 break;
2383 case 1:
2384 mask = ESR_INT_SIGNALS_P1_BITS;
2385 val = (ESR_INT_SRDY0_P1 |
2386 ESR_INT_DET0_P1 |
2387 ESR_INT_XSRDY_P1 |
2388 ESR_INT_XDP_P1_CH3 |
2389 ESR_INT_XDP_P1_CH2 |
2390 ESR_INT_XDP_P1_CH1 |
2391 ESR_INT_XDP_P1_CH0);
2392 break;
2394 default:
2395 return -EINVAL;
2398 if ((sig & mask) != val) {
2399 int err;
2400 err = serdes_init_1g_serdes(np);
2401 if (!err) {
2402 np->flags &= ~NIU_FLAGS_10G;
2403 np->mac_xcvr = MAC_XCVR_PCS;
2404 } else {
2405 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2406 np->port);
2407 return -ENODEV;
2411 return 0;
2414 static int niu_determine_phy_disposition(struct niu *np)
2416 struct niu_parent *parent = np->parent;
2417 u8 plat_type = parent->plat_type;
2418 const struct niu_phy_template *tp;
2419 u32 phy_addr_off = 0;
2421 if (plat_type == PLAT_TYPE_NIU) {
2422 switch (np->flags &
2423 (NIU_FLAGS_10G |
2424 NIU_FLAGS_FIBER |
2425 NIU_FLAGS_XCVR_SERDES)) {
2426 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2427 /* 10G Serdes */
2428 tp = &phy_template_niu_10g_serdes;
2429 break;
2430 case NIU_FLAGS_XCVR_SERDES:
2431 /* 1G Serdes */
2432 tp = &phy_template_niu_1g_serdes;
2433 break;
2434 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2435 /* 10G Fiber */
2436 default:
2437 tp = &phy_template_niu_10g_fiber;
2438 phy_addr_off += np->port;
2439 break;
2441 } else {
2442 switch (np->flags &
2443 (NIU_FLAGS_10G |
2444 NIU_FLAGS_FIBER |
2445 NIU_FLAGS_XCVR_SERDES)) {
2446 case 0:
2447 /* 1G copper */
2448 tp = &phy_template_1g_copper;
2449 if (plat_type == PLAT_TYPE_VF_P0)
2450 phy_addr_off = 10;
2451 else if (plat_type == PLAT_TYPE_VF_P1)
2452 phy_addr_off = 26;
2454 phy_addr_off += (np->port ^ 0x3);
2455 break;
2457 case NIU_FLAGS_10G:
2458 /* 10G copper */
2459 tp = &phy_template_1g_copper;
2460 break;
2462 case NIU_FLAGS_FIBER:
2463 /* 1G fiber */
2464 tp = &phy_template_1g_fiber;
2465 break;
2467 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2468 /* 10G fiber */
2469 tp = &phy_template_10g_fiber;
2470 if (plat_type == PLAT_TYPE_VF_P0 ||
2471 plat_type == PLAT_TYPE_VF_P1)
2472 phy_addr_off = 8;
2473 phy_addr_off += np->port;
2474 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2475 tp = &phy_template_10g_fiber_hotplug;
2476 if (np->port == 0)
2477 phy_addr_off = 8;
2478 if (np->port == 1)
2479 phy_addr_off = 12;
2481 break;
2483 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2484 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2485 case NIU_FLAGS_XCVR_SERDES:
2486 switch(np->port) {
2487 case 0:
2488 case 1:
2489 tp = &phy_template_10g_serdes;
2490 break;
2491 case 2:
2492 case 3:
2493 tp = &phy_template_1g_rgmii;
2494 break;
2495 default:
2496 return -EINVAL;
2497 break;
2499 phy_addr_off = niu_atca_port_num[np->port];
2500 break;
2502 default:
2503 return -EINVAL;
2507 np->phy_ops = tp->ops;
2508 np->phy_addr = tp->phy_addr_base + phy_addr_off;
2510 return 0;
2513 static int niu_init_link(struct niu *np)
2515 struct niu_parent *parent = np->parent;
2516 int err, ignore;
2518 if (parent->plat_type == PLAT_TYPE_NIU) {
2519 err = niu_xcvr_init(np);
2520 if (err)
2521 return err;
2522 msleep(200);
2524 err = niu_serdes_init(np);
2525 if (err)
2526 return err;
2527 msleep(200);
2528 err = niu_xcvr_init(np);
2529 if (!err)
2530 niu_link_status(np, &ignore);
2531 return 0;
2534 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2536 u16 reg0 = addr[4] << 8 | addr[5];
2537 u16 reg1 = addr[2] << 8 | addr[3];
2538 u16 reg2 = addr[0] << 8 | addr[1];
2540 if (np->flags & NIU_FLAGS_XMAC) {
2541 nw64_mac(XMAC_ADDR0, reg0);
2542 nw64_mac(XMAC_ADDR1, reg1);
2543 nw64_mac(XMAC_ADDR2, reg2);
2544 } else {
2545 nw64_mac(BMAC_ADDR0, reg0);
2546 nw64_mac(BMAC_ADDR1, reg1);
2547 nw64_mac(BMAC_ADDR2, reg2);
2551 static int niu_num_alt_addr(struct niu *np)
2553 if (np->flags & NIU_FLAGS_XMAC)
2554 return XMAC_NUM_ALT_ADDR;
2555 else
2556 return BMAC_NUM_ALT_ADDR;
2559 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2561 u16 reg0 = addr[4] << 8 | addr[5];
2562 u16 reg1 = addr[2] << 8 | addr[3];
2563 u16 reg2 = addr[0] << 8 | addr[1];
2565 if (index >= niu_num_alt_addr(np))
2566 return -EINVAL;
2568 if (np->flags & NIU_FLAGS_XMAC) {
2569 nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2570 nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2571 nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2572 } else {
2573 nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2574 nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2575 nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2578 return 0;
2581 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2583 unsigned long reg;
2584 u64 val, mask;
2586 if (index >= niu_num_alt_addr(np))
2587 return -EINVAL;
2589 if (np->flags & NIU_FLAGS_XMAC) {
2590 reg = XMAC_ADDR_CMPEN;
2591 mask = 1 << index;
2592 } else {
2593 reg = BMAC_ADDR_CMPEN;
2594 mask = 1 << (index + 1);
2597 val = nr64_mac(reg);
2598 if (on)
2599 val |= mask;
2600 else
2601 val &= ~mask;
2602 nw64_mac(reg, val);
2604 return 0;
2607 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2608 int num, int mac_pref)
2610 u64 val = nr64_mac(reg);
2611 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2612 val |= num;
2613 if (mac_pref)
2614 val |= HOST_INFO_MPR;
2615 nw64_mac(reg, val);
2618 static int __set_rdc_table_num(struct niu *np,
2619 int xmac_index, int bmac_index,
2620 int rdc_table_num, int mac_pref)
2622 unsigned long reg;
2624 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2625 return -EINVAL;
2626 if (np->flags & NIU_FLAGS_XMAC)
2627 reg = XMAC_HOST_INFO(xmac_index);
2628 else
2629 reg = BMAC_HOST_INFO(bmac_index);
2630 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2631 return 0;
2634 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2635 int mac_pref)
2637 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2640 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2641 int mac_pref)
2643 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2646 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2647 int table_num, int mac_pref)
2649 if (idx >= niu_num_alt_addr(np))
2650 return -EINVAL;
2651 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2654 static u64 vlan_entry_set_parity(u64 reg_val)
2656 u64 port01_mask;
2657 u64 port23_mask;
2659 port01_mask = 0x00ff;
2660 port23_mask = 0xff00;
2662 if (hweight64(reg_val & port01_mask) & 1)
2663 reg_val |= ENET_VLAN_TBL_PARITY0;
2664 else
2665 reg_val &= ~ENET_VLAN_TBL_PARITY0;
2667 if (hweight64(reg_val & port23_mask) & 1)
2668 reg_val |= ENET_VLAN_TBL_PARITY1;
2669 else
2670 reg_val &= ~ENET_VLAN_TBL_PARITY1;
2672 return reg_val;
2675 static void vlan_tbl_write(struct niu *np, unsigned long index,
2676 int port, int vpr, int rdc_table)
2678 u64 reg_val = nr64(ENET_VLAN_TBL(index));
2680 reg_val &= ~((ENET_VLAN_TBL_VPR |
2681 ENET_VLAN_TBL_VLANRDCTBLN) <<
2682 ENET_VLAN_TBL_SHIFT(port));
2683 if (vpr)
2684 reg_val |= (ENET_VLAN_TBL_VPR <<
2685 ENET_VLAN_TBL_SHIFT(port));
2686 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2688 reg_val = vlan_entry_set_parity(reg_val);
2690 nw64(ENET_VLAN_TBL(index), reg_val);
2693 static void vlan_tbl_clear(struct niu *np)
2695 int i;
2697 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2698 nw64(ENET_VLAN_TBL(i), 0);
2701 static int tcam_wait_bit(struct niu *np, u64 bit)
2703 int limit = 1000;
2705 while (--limit > 0) {
2706 if (nr64(TCAM_CTL) & bit)
2707 break;
2708 udelay(1);
2710 if (limit < 0)
2711 return -ENODEV;
2713 return 0;
2716 static int tcam_flush(struct niu *np, int index)
2718 nw64(TCAM_KEY_0, 0x00);
2719 nw64(TCAM_KEY_MASK_0, 0xff);
2720 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2722 return tcam_wait_bit(np, TCAM_CTL_STAT);
2725 #if 0
2726 static int tcam_read(struct niu *np, int index,
2727 u64 *key, u64 *mask)
2729 int err;
2731 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2732 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2733 if (!err) {
2734 key[0] = nr64(TCAM_KEY_0);
2735 key[1] = nr64(TCAM_KEY_1);
2736 key[2] = nr64(TCAM_KEY_2);
2737 key[3] = nr64(TCAM_KEY_3);
2738 mask[0] = nr64(TCAM_KEY_MASK_0);
2739 mask[1] = nr64(TCAM_KEY_MASK_1);
2740 mask[2] = nr64(TCAM_KEY_MASK_2);
2741 mask[3] = nr64(TCAM_KEY_MASK_3);
2743 return err;
2745 #endif
2747 static int tcam_write(struct niu *np, int index,
2748 u64 *key, u64 *mask)
2750 nw64(TCAM_KEY_0, key[0]);
2751 nw64(TCAM_KEY_1, key[1]);
2752 nw64(TCAM_KEY_2, key[2]);
2753 nw64(TCAM_KEY_3, key[3]);
2754 nw64(TCAM_KEY_MASK_0, mask[0]);
2755 nw64(TCAM_KEY_MASK_1, mask[1]);
2756 nw64(TCAM_KEY_MASK_2, mask[2]);
2757 nw64(TCAM_KEY_MASK_3, mask[3]);
2758 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2760 return tcam_wait_bit(np, TCAM_CTL_STAT);
2763 #if 0
2764 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2766 int err;
2768 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2769 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2770 if (!err)
2771 *data = nr64(TCAM_KEY_1);
2773 return err;
2775 #endif
2777 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2779 nw64(TCAM_KEY_1, assoc_data);
2780 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2782 return tcam_wait_bit(np, TCAM_CTL_STAT);
2785 static void tcam_enable(struct niu *np, int on)
2787 u64 val = nr64(FFLP_CFG_1);
2789 if (on)
2790 val &= ~FFLP_CFG_1_TCAM_DIS;
2791 else
2792 val |= FFLP_CFG_1_TCAM_DIS;
2793 nw64(FFLP_CFG_1, val);
2796 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2798 u64 val = nr64(FFLP_CFG_1);
2800 val &= ~(FFLP_CFG_1_FFLPINITDONE |
2801 FFLP_CFG_1_CAMLAT |
2802 FFLP_CFG_1_CAMRATIO);
2803 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2804 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2805 nw64(FFLP_CFG_1, val);
2807 val = nr64(FFLP_CFG_1);
2808 val |= FFLP_CFG_1_FFLPINITDONE;
2809 nw64(FFLP_CFG_1, val);
2812 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2813 int on)
2815 unsigned long reg;
2816 u64 val;
2818 if (class < CLASS_CODE_ETHERTYPE1 ||
2819 class > CLASS_CODE_ETHERTYPE2)
2820 return -EINVAL;
2822 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2823 val = nr64(reg);
2824 if (on)
2825 val |= L2_CLS_VLD;
2826 else
2827 val &= ~L2_CLS_VLD;
2828 nw64(reg, val);
2830 return 0;
2833 #if 0
2834 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2835 u64 ether_type)
2837 unsigned long reg;
2838 u64 val;
2840 if (class < CLASS_CODE_ETHERTYPE1 ||
2841 class > CLASS_CODE_ETHERTYPE2 ||
2842 (ether_type & ~(u64)0xffff) != 0)
2843 return -EINVAL;
2845 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2846 val = nr64(reg);
2847 val &= ~L2_CLS_ETYPE;
2848 val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2849 nw64(reg, val);
2851 return 0;
2853 #endif
2855 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2856 int on)
2858 unsigned long reg;
2859 u64 val;
2861 if (class < CLASS_CODE_USER_PROG1 ||
2862 class > CLASS_CODE_USER_PROG4)
2863 return -EINVAL;
2865 reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2866 val = nr64(reg);
2867 if (on)
2868 val |= L3_CLS_VALID;
2869 else
2870 val &= ~L3_CLS_VALID;
2871 nw64(reg, val);
2873 return 0;
2876 #if 0
2877 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2878 int ipv6, u64 protocol_id,
2879 u64 tos_mask, u64 tos_val)
2881 unsigned long reg;
2882 u64 val;
2884 if (class < CLASS_CODE_USER_PROG1 ||
2885 class > CLASS_CODE_USER_PROG4 ||
2886 (protocol_id & ~(u64)0xff) != 0 ||
2887 (tos_mask & ~(u64)0xff) != 0 ||
2888 (tos_val & ~(u64)0xff) != 0)
2889 return -EINVAL;
2891 reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2892 val = nr64(reg);
2893 val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2894 L3_CLS_TOSMASK | L3_CLS_TOS);
2895 if (ipv6)
2896 val |= L3_CLS_IPVER;
2897 val |= (protocol_id << L3_CLS_PID_SHIFT);
2898 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2899 val |= (tos_val << L3_CLS_TOS_SHIFT);
2900 nw64(reg, val);
2902 return 0;
2904 #endif
2906 static int tcam_early_init(struct niu *np)
2908 unsigned long i;
2909 int err;
2911 tcam_enable(np, 0);
2912 tcam_set_lat_and_ratio(np,
2913 DEFAULT_TCAM_LATENCY,
2914 DEFAULT_TCAM_ACCESS_RATIO);
2915 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2916 err = tcam_user_eth_class_enable(np, i, 0);
2917 if (err)
2918 return err;
2920 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2921 err = tcam_user_ip_class_enable(np, i, 0);
2922 if (err)
2923 return err;
2926 return 0;
2929 static int tcam_flush_all(struct niu *np)
2931 unsigned long i;
2933 for (i = 0; i < np->parent->tcam_num_entries; i++) {
2934 int err = tcam_flush(np, i);
2935 if (err)
2936 return err;
2938 return 0;
2941 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2943 return ((u64)index | (num_entries == 1 ?
2944 HASH_TBL_ADDR_AUTOINC : 0));
2947 #if 0
2948 static int hash_read(struct niu *np, unsigned long partition,
2949 unsigned long index, unsigned long num_entries,
2950 u64 *data)
2952 u64 val = hash_addr_regval(index, num_entries);
2953 unsigned long i;
2955 if (partition >= FCRAM_NUM_PARTITIONS ||
2956 index + num_entries > FCRAM_SIZE)
2957 return -EINVAL;
2959 nw64(HASH_TBL_ADDR(partition), val);
2960 for (i = 0; i < num_entries; i++)
2961 data[i] = nr64(HASH_TBL_DATA(partition));
2963 return 0;
2965 #endif
2967 static int hash_write(struct niu *np, unsigned long partition,
2968 unsigned long index, unsigned long num_entries,
2969 u64 *data)
2971 u64 val = hash_addr_regval(index, num_entries);
2972 unsigned long i;
2974 if (partition >= FCRAM_NUM_PARTITIONS ||
2975 index + (num_entries * 8) > FCRAM_SIZE)
2976 return -EINVAL;
2978 nw64(HASH_TBL_ADDR(partition), val);
2979 for (i = 0; i < num_entries; i++)
2980 nw64(HASH_TBL_DATA(partition), data[i]);
2982 return 0;
2985 static void fflp_reset(struct niu *np)
2987 u64 val;
2989 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
2990 udelay(10);
2991 nw64(FFLP_CFG_1, 0);
2993 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
2994 nw64(FFLP_CFG_1, val);
2997 static void fflp_set_timings(struct niu *np)
2999 u64 val = nr64(FFLP_CFG_1);
3001 val &= ~FFLP_CFG_1_FFLPINITDONE;
3002 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3003 nw64(FFLP_CFG_1, val);
3005 val = nr64(FFLP_CFG_1);
3006 val |= FFLP_CFG_1_FFLPINITDONE;
3007 nw64(FFLP_CFG_1, val);
3009 val = nr64(FCRAM_REF_TMR);
3010 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3011 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3012 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3013 nw64(FCRAM_REF_TMR, val);
3016 static int fflp_set_partition(struct niu *np, u64 partition,
3017 u64 mask, u64 base, int enable)
3019 unsigned long reg;
3020 u64 val;
3022 if (partition >= FCRAM_NUM_PARTITIONS ||
3023 (mask & ~(u64)0x1f) != 0 ||
3024 (base & ~(u64)0x1f) != 0)
3025 return -EINVAL;
3027 reg = FLW_PRT_SEL(partition);
3029 val = nr64(reg);
3030 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3031 val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3032 val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3033 if (enable)
3034 val |= FLW_PRT_SEL_EXT;
3035 nw64(reg, val);
3037 return 0;
3040 static int fflp_disable_all_partitions(struct niu *np)
3042 unsigned long i;
3044 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3045 int err = fflp_set_partition(np, 0, 0, 0, 0);
3046 if (err)
3047 return err;
3049 return 0;
3052 static void fflp_llcsnap_enable(struct niu *np, int on)
3054 u64 val = nr64(FFLP_CFG_1);
3056 if (on)
3057 val |= FFLP_CFG_1_LLCSNAP;
3058 else
3059 val &= ~FFLP_CFG_1_LLCSNAP;
3060 nw64(FFLP_CFG_1, val);
3063 static void fflp_errors_enable(struct niu *np, int on)
3065 u64 val = nr64(FFLP_CFG_1);
3067 if (on)
3068 val &= ~FFLP_CFG_1_ERRORDIS;
3069 else
3070 val |= FFLP_CFG_1_ERRORDIS;
3071 nw64(FFLP_CFG_1, val);
3074 static int fflp_hash_clear(struct niu *np)
3076 struct fcram_hash_ipv4 ent;
3077 unsigned long i;
3079 /* IPV4 hash entry with valid bit clear, rest is don't care. */
3080 memset(&ent, 0, sizeof(ent));
3081 ent.header = HASH_HEADER_EXT;
3083 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3084 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3085 if (err)
3086 return err;
3088 return 0;
3091 static int fflp_early_init(struct niu *np)
3093 struct niu_parent *parent;
3094 unsigned long flags;
3095 int err;
3097 niu_lock_parent(np, flags);
3099 parent = np->parent;
3100 err = 0;
3101 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3102 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
3103 np->port);
3104 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3105 fflp_reset(np);
3106 fflp_set_timings(np);
3107 err = fflp_disable_all_partitions(np);
3108 if (err) {
3109 niudbg(PROBE, "fflp_disable_all_partitions "
3110 "failed, err=%d\n", err);
3111 goto out;
3115 err = tcam_early_init(np);
3116 if (err) {
3117 niudbg(PROBE, "tcam_early_init failed, err=%d\n",
3118 err);
3119 goto out;
3121 fflp_llcsnap_enable(np, 1);
3122 fflp_errors_enable(np, 0);
3123 nw64(H1POLY, 0);
3124 nw64(H2POLY, 0);
3126 err = tcam_flush_all(np);
3127 if (err) {
3128 niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
3129 err);
3130 goto out;
3132 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3133 err = fflp_hash_clear(np);
3134 if (err) {
3135 niudbg(PROBE, "fflp_hash_clear failed, "
3136 "err=%d\n", err);
3137 goto out;
3141 vlan_tbl_clear(np);
3143 niudbg(PROBE, "fflp_early_init: Success\n");
3144 parent->flags |= PARENT_FLGS_CLS_HWINIT;
3146 out:
3147 niu_unlock_parent(np, flags);
3148 return err;
3151 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3153 if (class_code < CLASS_CODE_USER_PROG1 ||
3154 class_code > CLASS_CODE_SCTP_IPV6)
3155 return -EINVAL;
3157 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3158 return 0;
3161 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3163 if (class_code < CLASS_CODE_USER_PROG1 ||
3164 class_code > CLASS_CODE_SCTP_IPV6)
3165 return -EINVAL;
3167 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3168 return 0;
3171 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3172 u32 offset, u32 size)
3174 int i = skb_shinfo(skb)->nr_frags;
3175 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3177 frag->page = page;
3178 frag->page_offset = offset;
3179 frag->size = size;
3181 skb->len += size;
3182 skb->data_len += size;
3183 skb->truesize += size;
3185 skb_shinfo(skb)->nr_frags = i + 1;
3188 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3190 a >>= PAGE_SHIFT;
3191 a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3193 return (a & (MAX_RBR_RING_SIZE - 1));
3196 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3197 struct page ***link)
3199 unsigned int h = niu_hash_rxaddr(rp, addr);
3200 struct page *p, **pp;
3202 addr &= PAGE_MASK;
3203 pp = &rp->rxhash[h];
3204 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3205 if (p->index == addr) {
3206 *link = pp;
3207 break;
3211 return p;
3214 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3216 unsigned int h = niu_hash_rxaddr(rp, base);
3218 page->index = base;
3219 page->mapping = (struct address_space *) rp->rxhash[h];
3220 rp->rxhash[h] = page;
3223 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3224 gfp_t mask, int start_index)
3226 struct page *page;
3227 u64 addr;
3228 int i;
3230 page = alloc_page(mask);
3231 if (!page)
3232 return -ENOMEM;
3234 addr = np->ops->map_page(np->device, page, 0,
3235 PAGE_SIZE, DMA_FROM_DEVICE);
3237 niu_hash_page(rp, page, addr);
3238 if (rp->rbr_blocks_per_page > 1)
3239 atomic_add(rp->rbr_blocks_per_page - 1,
3240 &compound_head(page)->_count);
3242 for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3243 __le32 *rbr = &rp->rbr[start_index + i];
3245 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3246 addr += rp->rbr_block_size;
3249 return 0;
3252 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3254 int index = rp->rbr_index;
3256 rp->rbr_pending++;
3257 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3258 int err = niu_rbr_add_page(np, rp, mask, index);
3260 if (unlikely(err)) {
3261 rp->rbr_pending--;
3262 return;
3265 rp->rbr_index += rp->rbr_blocks_per_page;
3266 BUG_ON(rp->rbr_index > rp->rbr_table_size);
3267 if (rp->rbr_index == rp->rbr_table_size)
3268 rp->rbr_index = 0;
3270 if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3271 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3272 rp->rbr_pending = 0;
3277 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3279 unsigned int index = rp->rcr_index;
3280 int num_rcr = 0;
3282 rp->rx_dropped++;
3283 while (1) {
3284 struct page *page, **link;
3285 u64 addr, val;
3286 u32 rcr_size;
3288 num_rcr++;
3290 val = le64_to_cpup(&rp->rcr[index]);
3291 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3292 RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3293 page = niu_find_rxpage(rp, addr, &link);
3295 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3296 RCR_ENTRY_PKTBUFSZ_SHIFT];
3297 if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3298 *link = (struct page *) page->mapping;
3299 np->ops->unmap_page(np->device, page->index,
3300 PAGE_SIZE, DMA_FROM_DEVICE);
3301 page->index = 0;
3302 page->mapping = NULL;
3303 __free_page(page);
3304 rp->rbr_refill_pending++;
3307 index = NEXT_RCR(rp, index);
3308 if (!(val & RCR_ENTRY_MULTI))
3309 break;
3312 rp->rcr_index = index;
3314 return num_rcr;
3317 static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3319 unsigned int index = rp->rcr_index;
3320 struct sk_buff *skb;
3321 int len, num_rcr;
3323 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3324 if (unlikely(!skb))
3325 return niu_rx_pkt_ignore(np, rp);
3327 num_rcr = 0;
3328 while (1) {
3329 struct page *page, **link;
3330 u32 rcr_size, append_size;
3331 u64 addr, val, off;
3333 num_rcr++;
3335 val = le64_to_cpup(&rp->rcr[index]);
3337 len = (val & RCR_ENTRY_L2_LEN) >>
3338 RCR_ENTRY_L2_LEN_SHIFT;
3339 len -= ETH_FCS_LEN;
3341 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3342 RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3343 page = niu_find_rxpage(rp, addr, &link);
3345 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3346 RCR_ENTRY_PKTBUFSZ_SHIFT];
3348 off = addr & ~PAGE_MASK;
3349 append_size = rcr_size;
3350 if (num_rcr == 1) {
3351 int ptype;
3353 off += 2;
3354 append_size -= 2;
3356 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3357 if ((ptype == RCR_PKT_TYPE_TCP ||
3358 ptype == RCR_PKT_TYPE_UDP) &&
3359 !(val & (RCR_ENTRY_NOPORT |
3360 RCR_ENTRY_ERROR)))
3361 skb->ip_summed = CHECKSUM_UNNECESSARY;
3362 else
3363 skb->ip_summed = CHECKSUM_NONE;
3365 if (!(val & RCR_ENTRY_MULTI))
3366 append_size = len - skb->len;
3368 niu_rx_skb_append(skb, page, off, append_size);
3369 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3370 *link = (struct page *) page->mapping;
3371 np->ops->unmap_page(np->device, page->index,
3372 PAGE_SIZE, DMA_FROM_DEVICE);
3373 page->index = 0;
3374 page->mapping = NULL;
3375 rp->rbr_refill_pending++;
3376 } else
3377 get_page(page);
3379 index = NEXT_RCR(rp, index);
3380 if (!(val & RCR_ENTRY_MULTI))
3381 break;
3384 rp->rcr_index = index;
3386 skb_reserve(skb, NET_IP_ALIGN);
3387 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
3389 rp->rx_packets++;
3390 rp->rx_bytes += skb->len;
3392 skb->protocol = eth_type_trans(skb, np->dev);
3393 netif_receive_skb(skb);
3395 np->dev->last_rx = jiffies;
3397 return num_rcr;
3400 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3402 int blocks_per_page = rp->rbr_blocks_per_page;
3403 int err, index = rp->rbr_index;
3405 err = 0;
3406 while (index < (rp->rbr_table_size - blocks_per_page)) {
3407 err = niu_rbr_add_page(np, rp, mask, index);
3408 if (err)
3409 break;
3411 index += blocks_per_page;
3414 rp->rbr_index = index;
3415 return err;
3418 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3420 int i;
3422 for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3423 struct page *page;
3425 page = rp->rxhash[i];
3426 while (page) {
3427 struct page *next = (struct page *) page->mapping;
3428 u64 base = page->index;
3430 np->ops->unmap_page(np->device, base, PAGE_SIZE,
3431 DMA_FROM_DEVICE);
3432 page->index = 0;
3433 page->mapping = NULL;
3435 __free_page(page);
3437 page = next;
3441 for (i = 0; i < rp->rbr_table_size; i++)
3442 rp->rbr[i] = cpu_to_le32(0);
3443 rp->rbr_index = 0;
3446 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3448 struct tx_buff_info *tb = &rp->tx_buffs[idx];
3449 struct sk_buff *skb = tb->skb;
3450 struct tx_pkt_hdr *tp;
3451 u64 tx_flags;
3452 int i, len;
3454 tp = (struct tx_pkt_hdr *) skb->data;
3455 tx_flags = le64_to_cpup(&tp->flags);
3457 rp->tx_packets++;
3458 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3459 ((tx_flags & TXHDR_PAD) / 2));
3461 len = skb_headlen(skb);
3462 np->ops->unmap_single(np->device, tb->mapping,
3463 len, DMA_TO_DEVICE);
3465 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3466 rp->mark_pending--;
3468 tb->skb = NULL;
3469 do {
3470 idx = NEXT_TX(rp, idx);
3471 len -= MAX_TX_DESC_LEN;
3472 } while (len > 0);
3474 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3475 tb = &rp->tx_buffs[idx];
3476 BUG_ON(tb->skb != NULL);
3477 np->ops->unmap_page(np->device, tb->mapping,
3478 skb_shinfo(skb)->frags[i].size,
3479 DMA_TO_DEVICE);
3480 idx = NEXT_TX(rp, idx);
3483 dev_kfree_skb(skb);
3485 return idx;
3488 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3490 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3492 struct netdev_queue *txq;
3493 u16 pkt_cnt, tmp;
3494 int cons, index;
3495 u64 cs;
3497 index = (rp - np->tx_rings);
3498 txq = netdev_get_tx_queue(np->dev, index);
3500 cs = rp->tx_cs;
3501 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3502 goto out;
3504 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3505 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3506 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3508 rp->last_pkt_cnt = tmp;
3510 cons = rp->cons;
3512 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3513 np->dev->name, pkt_cnt, cons);
3515 while (pkt_cnt--)
3516 cons = release_tx_packet(np, rp, cons);
3518 rp->cons = cons;
3519 smp_mb();
3521 out:
3522 if (unlikely(netif_tx_queue_stopped(txq) &&
3523 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3524 __netif_tx_lock(txq, smp_processor_id());
3525 if (netif_tx_queue_stopped(txq) &&
3526 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3527 netif_tx_wake_queue(txq);
3528 __netif_tx_unlock(txq);
3532 static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3534 int qlen, rcr_done = 0, work_done = 0;
3535 struct rxdma_mailbox *mbox = rp->mbox;
3536 u64 stat;
3538 #if 1
3539 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3540 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3541 #else
3542 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3543 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3544 #endif
3545 mbox->rx_dma_ctl_stat = 0;
3546 mbox->rcrstat_a = 0;
3548 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3549 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3551 rcr_done = work_done = 0;
3552 qlen = min(qlen, budget);
3553 while (work_done < qlen) {
3554 rcr_done += niu_process_rx_pkt(np, rp);
3555 work_done++;
3558 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3559 unsigned int i;
3561 for (i = 0; i < rp->rbr_refill_pending; i++)
3562 niu_rbr_refill(np, rp, GFP_ATOMIC);
3563 rp->rbr_refill_pending = 0;
3566 stat = (RX_DMA_CTL_STAT_MEX |
3567 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3568 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3570 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3572 return work_done;
3575 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3577 u64 v0 = lp->v0;
3578 u32 tx_vec = (v0 >> 32);
3579 u32 rx_vec = (v0 & 0xffffffff);
3580 int i, work_done = 0;
3582 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3583 np->dev->name, (unsigned long long) v0);
3585 for (i = 0; i < np->num_tx_rings; i++) {
3586 struct tx_ring_info *rp = &np->tx_rings[i];
3587 if (tx_vec & (1 << rp->tx_channel))
3588 niu_tx_work(np, rp);
3589 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3592 for (i = 0; i < np->num_rx_rings; i++) {
3593 struct rx_ring_info *rp = &np->rx_rings[i];
3595 if (rx_vec & (1 << rp->rx_channel)) {
3596 int this_work_done;
3598 this_work_done = niu_rx_work(np, rp,
3599 budget);
3601 budget -= this_work_done;
3602 work_done += this_work_done;
3604 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3607 return work_done;
3610 static int niu_poll(struct napi_struct *napi, int budget)
3612 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3613 struct niu *np = lp->np;
3614 int work_done;
3616 work_done = niu_poll_core(np, lp, budget);
3618 if (work_done < budget) {
3619 netif_rx_complete(np->dev, napi);
3620 niu_ldg_rearm(np, lp, 1);
3622 return work_done;
3625 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3626 u64 stat)
3628 dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3629 np->dev->name, rp->rx_channel);
3631 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3632 printk("RBR_TMOUT ");
3633 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3634 printk("RSP_CNT ");
3635 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3636 printk("BYTE_EN_BUS ");
3637 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3638 printk("RSP_DAT ");
3639 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3640 printk("RCR_ACK ");
3641 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3642 printk("RCR_SHA_PAR ");
3643 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3644 printk("RBR_PRE_PAR ");
3645 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3646 printk("CONFIG ");
3647 if (stat & RX_DMA_CTL_STAT_RCRINCON)
3648 printk("RCRINCON ");
3649 if (stat & RX_DMA_CTL_STAT_RCRFULL)
3650 printk("RCRFULL ");
3651 if (stat & RX_DMA_CTL_STAT_RBRFULL)
3652 printk("RBRFULL ");
3653 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3654 printk("RBRLOGPAGE ");
3655 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3656 printk("CFIGLOGPAGE ");
3657 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3658 printk("DC_FIDO ");
3660 printk(")\n");
3663 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3665 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3666 int err = 0;
3669 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3670 RX_DMA_CTL_STAT_PORT_FATAL))
3671 err = -EINVAL;
3673 if (err) {
3674 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3675 np->dev->name, rp->rx_channel,
3676 (unsigned long long) stat);
3678 niu_log_rxchan_errors(np, rp, stat);
3681 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3682 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3684 return err;
3687 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3688 u64 cs)
3690 dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3691 np->dev->name, rp->tx_channel);
3693 if (cs & TX_CS_MBOX_ERR)
3694 printk("MBOX ");
3695 if (cs & TX_CS_PKT_SIZE_ERR)
3696 printk("PKT_SIZE ");
3697 if (cs & TX_CS_TX_RING_OFLOW)
3698 printk("TX_RING_OFLOW ");
3699 if (cs & TX_CS_PREF_BUF_PAR_ERR)
3700 printk("PREF_BUF_PAR ");
3701 if (cs & TX_CS_NACK_PREF)
3702 printk("NACK_PREF ");
3703 if (cs & TX_CS_NACK_PKT_RD)
3704 printk("NACK_PKT_RD ");
3705 if (cs & TX_CS_CONF_PART_ERR)
3706 printk("CONF_PART ");
3707 if (cs & TX_CS_PKT_PRT_ERR)
3708 printk("PKT_PTR ");
3710 printk(")\n");
3713 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3715 u64 cs, logh, logl;
3717 cs = nr64(TX_CS(rp->tx_channel));
3718 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3719 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3721 dev_err(np->device, PFX "%s: TX channel %u error, "
3722 "cs[%llx] logh[%llx] logl[%llx]\n",
3723 np->dev->name, rp->tx_channel,
3724 (unsigned long long) cs,
3725 (unsigned long long) logh,
3726 (unsigned long long) logl);
3728 niu_log_txchan_errors(np, rp, cs);
3730 return -ENODEV;
3733 static int niu_mif_interrupt(struct niu *np)
3735 u64 mif_status = nr64(MIF_STATUS);
3736 int phy_mdint = 0;
3738 if (np->flags & NIU_FLAGS_XMAC) {
3739 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3741 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3742 phy_mdint = 1;
3745 dev_err(np->device, PFX "%s: MIF interrupt, "
3746 "stat[%llx] phy_mdint(%d)\n",
3747 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3749 return -ENODEV;
3752 static void niu_xmac_interrupt(struct niu *np)
3754 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3755 u64 val;
3757 val = nr64_mac(XTXMAC_STATUS);
3758 if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3759 mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3760 if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3761 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3762 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3763 mp->tx_fifo_errors++;
3764 if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3765 mp->tx_overflow_errors++;
3766 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3767 mp->tx_max_pkt_size_errors++;
3768 if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3769 mp->tx_underflow_errors++;
3771 val = nr64_mac(XRXMAC_STATUS);
3772 if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3773 mp->rx_local_faults++;
3774 if (val & XRXMAC_STATUS_RFLT_DET)
3775 mp->rx_remote_faults++;
3776 if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3777 mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3778 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3779 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3780 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3781 mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3782 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3783 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3784 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3785 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3786 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3787 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3788 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3789 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3790 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3791 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3792 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3793 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3794 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3795 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3796 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3797 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3798 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3799 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3800 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3801 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3802 if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
3803 mp->rx_octets += RXMAC_BT_CNT_COUNT;
3804 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3805 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3806 if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3807 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3808 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3809 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3810 if (val & XRXMAC_STATUS_RXUFLOW)
3811 mp->rx_underflows++;
3812 if (val & XRXMAC_STATUS_RXOFLOW)
3813 mp->rx_overflows++;
3815 val = nr64_mac(XMAC_FC_STAT);
3816 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3817 mp->pause_off_state++;
3818 if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3819 mp->pause_on_state++;
3820 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3821 mp->pause_received++;
3824 static void niu_bmac_interrupt(struct niu *np)
3826 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3827 u64 val;
3829 val = nr64_mac(BTXMAC_STATUS);
3830 if (val & BTXMAC_STATUS_UNDERRUN)
3831 mp->tx_underflow_errors++;
3832 if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3833 mp->tx_max_pkt_size_errors++;
3834 if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3835 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3836 if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3837 mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3839 val = nr64_mac(BRXMAC_STATUS);
3840 if (val & BRXMAC_STATUS_OVERFLOW)
3841 mp->rx_overflows++;
3842 if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3843 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3844 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3845 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3846 if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3847 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3848 if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3849 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3851 val = nr64_mac(BMAC_CTRL_STATUS);
3852 if (val & BMAC_CTRL_STATUS_NOPAUSE)
3853 mp->pause_off_state++;
3854 if (val & BMAC_CTRL_STATUS_PAUSE)
3855 mp->pause_on_state++;
3856 if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3857 mp->pause_received++;
3860 static int niu_mac_interrupt(struct niu *np)
3862 if (np->flags & NIU_FLAGS_XMAC)
3863 niu_xmac_interrupt(np);
3864 else
3865 niu_bmac_interrupt(np);
3867 return 0;
3870 static void niu_log_device_error(struct niu *np, u64 stat)
3872 dev_err(np->device, PFX "%s: Core device errors ( ",
3873 np->dev->name);
3875 if (stat & SYS_ERR_MASK_META2)
3876 printk("META2 ");
3877 if (stat & SYS_ERR_MASK_META1)
3878 printk("META1 ");
3879 if (stat & SYS_ERR_MASK_PEU)
3880 printk("PEU ");
3881 if (stat & SYS_ERR_MASK_TXC)
3882 printk("TXC ");
3883 if (stat & SYS_ERR_MASK_RDMC)
3884 printk("RDMC ");
3885 if (stat & SYS_ERR_MASK_TDMC)
3886 printk("TDMC ");
3887 if (stat & SYS_ERR_MASK_ZCP)
3888 printk("ZCP ");
3889 if (stat & SYS_ERR_MASK_FFLP)
3890 printk("FFLP ");
3891 if (stat & SYS_ERR_MASK_IPP)
3892 printk("IPP ");
3893 if (stat & SYS_ERR_MASK_MAC)
3894 printk("MAC ");
3895 if (stat & SYS_ERR_MASK_SMX)
3896 printk("SMX ");
3898 printk(")\n");
3901 static int niu_device_error(struct niu *np)
3903 u64 stat = nr64(SYS_ERR_STAT);
3905 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
3906 np->dev->name, (unsigned long long) stat);
3908 niu_log_device_error(np, stat);
3910 return -ENODEV;
3913 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
3914 u64 v0, u64 v1, u64 v2)
3917 int i, err = 0;
3919 lp->v0 = v0;
3920 lp->v1 = v1;
3921 lp->v2 = v2;
3923 if (v1 & 0x00000000ffffffffULL) {
3924 u32 rx_vec = (v1 & 0xffffffff);
3926 for (i = 0; i < np->num_rx_rings; i++) {
3927 struct rx_ring_info *rp = &np->rx_rings[i];
3929 if (rx_vec & (1 << rp->rx_channel)) {
3930 int r = niu_rx_error(np, rp);
3931 if (r) {
3932 err = r;
3933 } else {
3934 if (!v0)
3935 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3936 RX_DMA_CTL_STAT_MEX);
3941 if (v1 & 0x7fffffff00000000ULL) {
3942 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
3944 for (i = 0; i < np->num_tx_rings; i++) {
3945 struct tx_ring_info *rp = &np->tx_rings[i];
3947 if (tx_vec & (1 << rp->tx_channel)) {
3948 int r = niu_tx_error(np, rp);
3949 if (r)
3950 err = r;
3954 if ((v0 | v1) & 0x8000000000000000ULL) {
3955 int r = niu_mif_interrupt(np);
3956 if (r)
3957 err = r;
3959 if (v2) {
3960 if (v2 & 0x01ef) {
3961 int r = niu_mac_interrupt(np);
3962 if (r)
3963 err = r;
3965 if (v2 & 0x0210) {
3966 int r = niu_device_error(np);
3967 if (r)
3968 err = r;
3972 if (err)
3973 niu_enable_interrupts(np, 0);
3975 return err;
3978 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
3979 int ldn)
3981 struct rxdma_mailbox *mbox = rp->mbox;
3982 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3984 stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
3985 RX_DMA_CTL_STAT_RCRTO);
3986 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
3988 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
3989 np->dev->name, (unsigned long long) stat);
3992 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
3993 int ldn)
3995 rp->tx_cs = nr64(TX_CS(rp->tx_channel));
3997 niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
3998 np->dev->name, (unsigned long long) rp->tx_cs);
4001 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4003 struct niu_parent *parent = np->parent;
4004 u32 rx_vec, tx_vec;
4005 int i;
4007 tx_vec = (v0 >> 32);
4008 rx_vec = (v0 & 0xffffffff);
4010 for (i = 0; i < np->num_rx_rings; i++) {
4011 struct rx_ring_info *rp = &np->rx_rings[i];
4012 int ldn = LDN_RXDMA(rp->rx_channel);
4014 if (parent->ldg_map[ldn] != ldg)
4015 continue;
4017 nw64(LD_IM0(ldn), LD_IM0_MASK);
4018 if (rx_vec & (1 << rp->rx_channel))
4019 niu_rxchan_intr(np, rp, ldn);
4022 for (i = 0; i < np->num_tx_rings; i++) {
4023 struct tx_ring_info *rp = &np->tx_rings[i];
4024 int ldn = LDN_TXDMA(rp->tx_channel);
4026 if (parent->ldg_map[ldn] != ldg)
4027 continue;
4029 nw64(LD_IM0(ldn), LD_IM0_MASK);
4030 if (tx_vec & (1 << rp->tx_channel))
4031 niu_txchan_intr(np, rp, ldn);
4035 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4036 u64 v0, u64 v1, u64 v2)
4038 if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
4039 lp->v0 = v0;
4040 lp->v1 = v1;
4041 lp->v2 = v2;
4042 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4043 __netif_rx_schedule(np->dev, &lp->napi);
4047 static irqreturn_t niu_interrupt(int irq, void *dev_id)
4049 struct niu_ldg *lp = dev_id;
4050 struct niu *np = lp->np;
4051 int ldg = lp->ldg_num;
4052 unsigned long flags;
4053 u64 v0, v1, v2;
4055 if (netif_msg_intr(np))
4056 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
4057 lp, ldg);
4059 spin_lock_irqsave(&np->lock, flags);
4061 v0 = nr64(LDSV0(ldg));
4062 v1 = nr64(LDSV1(ldg));
4063 v2 = nr64(LDSV2(ldg));
4065 if (netif_msg_intr(np))
4066 printk("v0[%llx] v1[%llx] v2[%llx]\n",
4067 (unsigned long long) v0,
4068 (unsigned long long) v1,
4069 (unsigned long long) v2);
4071 if (unlikely(!v0 && !v1 && !v2)) {
4072 spin_unlock_irqrestore(&np->lock, flags);
4073 return IRQ_NONE;
4076 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4077 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4078 if (err)
4079 goto out;
4081 if (likely(v0 & ~((u64)1 << LDN_MIF)))
4082 niu_schedule_napi(np, lp, v0, v1, v2);
4083 else
4084 niu_ldg_rearm(np, lp, 1);
4085 out:
4086 spin_unlock_irqrestore(&np->lock, flags);
4088 return IRQ_HANDLED;
4091 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4093 if (rp->mbox) {
4094 np->ops->free_coherent(np->device,
4095 sizeof(struct rxdma_mailbox),
4096 rp->mbox, rp->mbox_dma);
4097 rp->mbox = NULL;
4099 if (rp->rcr) {
4100 np->ops->free_coherent(np->device,
4101 MAX_RCR_RING_SIZE * sizeof(__le64),
4102 rp->rcr, rp->rcr_dma);
4103 rp->rcr = NULL;
4104 rp->rcr_table_size = 0;
4105 rp->rcr_index = 0;
4107 if (rp->rbr) {
4108 niu_rbr_free(np, rp);
4110 np->ops->free_coherent(np->device,
4111 MAX_RBR_RING_SIZE * sizeof(__le32),
4112 rp->rbr, rp->rbr_dma);
4113 rp->rbr = NULL;
4114 rp->rbr_table_size = 0;
4115 rp->rbr_index = 0;
4117 kfree(rp->rxhash);
4118 rp->rxhash = NULL;
4121 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4123 if (rp->mbox) {
4124 np->ops->free_coherent(np->device,
4125 sizeof(struct txdma_mailbox),
4126 rp->mbox, rp->mbox_dma);
4127 rp->mbox = NULL;
4129 if (rp->descr) {
4130 int i;
4132 for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4133 if (rp->tx_buffs[i].skb)
4134 (void) release_tx_packet(np, rp, i);
4137 np->ops->free_coherent(np->device,
4138 MAX_TX_RING_SIZE * sizeof(__le64),
4139 rp->descr, rp->descr_dma);
4140 rp->descr = NULL;
4141 rp->pending = 0;
4142 rp->prod = 0;
4143 rp->cons = 0;
4144 rp->wrap_bit = 0;
4148 static void niu_free_channels(struct niu *np)
4150 int i;
4152 if (np->rx_rings) {
4153 for (i = 0; i < np->num_rx_rings; i++) {
4154 struct rx_ring_info *rp = &np->rx_rings[i];
4156 niu_free_rx_ring_info(np, rp);
4158 kfree(np->rx_rings);
4159 np->rx_rings = NULL;
4160 np->num_rx_rings = 0;
4163 if (np->tx_rings) {
4164 for (i = 0; i < np->num_tx_rings; i++) {
4165 struct tx_ring_info *rp = &np->tx_rings[i];
4167 niu_free_tx_ring_info(np, rp);
4169 kfree(np->tx_rings);
4170 np->tx_rings = NULL;
4171 np->num_tx_rings = 0;
4175 static int niu_alloc_rx_ring_info(struct niu *np,
4176 struct rx_ring_info *rp)
4178 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4180 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
4181 GFP_KERNEL);
4182 if (!rp->rxhash)
4183 return -ENOMEM;
4185 rp->mbox = np->ops->alloc_coherent(np->device,
4186 sizeof(struct rxdma_mailbox),
4187 &rp->mbox_dma, GFP_KERNEL);
4188 if (!rp->mbox)
4189 return -ENOMEM;
4190 if ((unsigned long)rp->mbox & (64UL - 1)) {
4191 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4192 "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
4193 return -EINVAL;
4196 rp->rcr = np->ops->alloc_coherent(np->device,
4197 MAX_RCR_RING_SIZE * sizeof(__le64),
4198 &rp->rcr_dma, GFP_KERNEL);
4199 if (!rp->rcr)
4200 return -ENOMEM;
4201 if ((unsigned long)rp->rcr & (64UL - 1)) {
4202 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4203 "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
4204 return -EINVAL;
4206 rp->rcr_table_size = MAX_RCR_RING_SIZE;
4207 rp->rcr_index = 0;
4209 rp->rbr = np->ops->alloc_coherent(np->device,
4210 MAX_RBR_RING_SIZE * sizeof(__le32),
4211 &rp->rbr_dma, GFP_KERNEL);
4212 if (!rp->rbr)
4213 return -ENOMEM;
4214 if ((unsigned long)rp->rbr & (64UL - 1)) {
4215 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4216 "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
4217 return -EINVAL;
4219 rp->rbr_table_size = MAX_RBR_RING_SIZE;
4220 rp->rbr_index = 0;
4221 rp->rbr_pending = 0;
4223 return 0;
4226 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4228 int mtu = np->dev->mtu;
4230 /* These values are recommended by the HW designers for fair
4231 * utilization of DRR amongst the rings.
4233 rp->max_burst = mtu + 32;
4234 if (rp->max_burst > 4096)
4235 rp->max_burst = 4096;
4238 static int niu_alloc_tx_ring_info(struct niu *np,
4239 struct tx_ring_info *rp)
4241 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4243 rp->mbox = np->ops->alloc_coherent(np->device,
4244 sizeof(struct txdma_mailbox),
4245 &rp->mbox_dma, GFP_KERNEL);
4246 if (!rp->mbox)
4247 return -ENOMEM;
4248 if ((unsigned long)rp->mbox & (64UL - 1)) {
4249 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4250 "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
4251 return -EINVAL;
4254 rp->descr = np->ops->alloc_coherent(np->device,
4255 MAX_TX_RING_SIZE * sizeof(__le64),
4256 &rp->descr_dma, GFP_KERNEL);
4257 if (!rp->descr)
4258 return -ENOMEM;
4259 if ((unsigned long)rp->descr & (64UL - 1)) {
4260 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4261 "TXDMA descr table %p\n", np->dev->name, rp->descr);
4262 return -EINVAL;
4265 rp->pending = MAX_TX_RING_SIZE;
4266 rp->prod = 0;
4267 rp->cons = 0;
4268 rp->wrap_bit = 0;
4270 /* XXX make these configurable... XXX */
4271 rp->mark_freq = rp->pending / 4;
4273 niu_set_max_burst(np, rp);
4275 return 0;
4278 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4280 u16 bss;
4282 bss = min(PAGE_SHIFT, 15);
4284 rp->rbr_block_size = 1 << bss;
4285 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4287 rp->rbr_sizes[0] = 256;
4288 rp->rbr_sizes[1] = 1024;
4289 if (np->dev->mtu > ETH_DATA_LEN) {
4290 switch (PAGE_SIZE) {
4291 case 4 * 1024:
4292 rp->rbr_sizes[2] = 4096;
4293 break;
4295 default:
4296 rp->rbr_sizes[2] = 8192;
4297 break;
4299 } else {
4300 rp->rbr_sizes[2] = 2048;
4302 rp->rbr_sizes[3] = rp->rbr_block_size;
4305 static int niu_alloc_channels(struct niu *np)
4307 struct niu_parent *parent = np->parent;
4308 int first_rx_channel, first_tx_channel;
4309 int i, port, err;
4311 port = np->port;
4312 first_rx_channel = first_tx_channel = 0;
4313 for (i = 0; i < port; i++) {
4314 first_rx_channel += parent->rxchan_per_port[i];
4315 first_tx_channel += parent->txchan_per_port[i];
4318 np->num_rx_rings = parent->rxchan_per_port[port];
4319 np->num_tx_rings = parent->txchan_per_port[port];
4321 np->dev->real_num_tx_queues = np->num_tx_rings;
4323 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4324 GFP_KERNEL);
4325 err = -ENOMEM;
4326 if (!np->rx_rings)
4327 goto out_err;
4329 for (i = 0; i < np->num_rx_rings; i++) {
4330 struct rx_ring_info *rp = &np->rx_rings[i];
4332 rp->np = np;
4333 rp->rx_channel = first_rx_channel + i;
4335 err = niu_alloc_rx_ring_info(np, rp);
4336 if (err)
4337 goto out_err;
4339 niu_size_rbr(np, rp);
4341 /* XXX better defaults, configurable, etc... XXX */
4342 rp->nonsyn_window = 64;
4343 rp->nonsyn_threshold = rp->rcr_table_size - 64;
4344 rp->syn_window = 64;
4345 rp->syn_threshold = rp->rcr_table_size - 64;
4346 rp->rcr_pkt_threshold = 16;
4347 rp->rcr_timeout = 8;
4348 rp->rbr_kick_thresh = RBR_REFILL_MIN;
4349 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4350 rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4352 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4353 if (err)
4354 return err;
4357 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4358 GFP_KERNEL);
4359 err = -ENOMEM;
4360 if (!np->tx_rings)
4361 goto out_err;
4363 for (i = 0; i < np->num_tx_rings; i++) {
4364 struct tx_ring_info *rp = &np->tx_rings[i];
4366 rp->np = np;
4367 rp->tx_channel = first_tx_channel + i;
4369 err = niu_alloc_tx_ring_info(np, rp);
4370 if (err)
4371 goto out_err;
4374 return 0;
4376 out_err:
4377 niu_free_channels(np);
4378 return err;
4381 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4383 int limit = 1000;
4385 while (--limit > 0) {
4386 u64 val = nr64(TX_CS(channel));
4387 if (val & TX_CS_SNG_STATE)
4388 return 0;
4390 return -ENODEV;
4393 static int niu_tx_channel_stop(struct niu *np, int channel)
4395 u64 val = nr64(TX_CS(channel));
4397 val |= TX_CS_STOP_N_GO;
4398 nw64(TX_CS(channel), val);
4400 return niu_tx_cs_sng_poll(np, channel);
4403 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4405 int limit = 1000;
4407 while (--limit > 0) {
4408 u64 val = nr64(TX_CS(channel));
4409 if (!(val & TX_CS_RST))
4410 return 0;
4412 return -ENODEV;
4415 static int niu_tx_channel_reset(struct niu *np, int channel)
4417 u64 val = nr64(TX_CS(channel));
4418 int err;
4420 val |= TX_CS_RST;
4421 nw64(TX_CS(channel), val);
4423 err = niu_tx_cs_reset_poll(np, channel);
4424 if (!err)
4425 nw64(TX_RING_KICK(channel), 0);
4427 return err;
4430 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4432 u64 val;
4434 nw64(TX_LOG_MASK1(channel), 0);
4435 nw64(TX_LOG_VAL1(channel), 0);
4436 nw64(TX_LOG_MASK2(channel), 0);
4437 nw64(TX_LOG_VAL2(channel), 0);
4438 nw64(TX_LOG_PAGE_RELO1(channel), 0);
4439 nw64(TX_LOG_PAGE_RELO2(channel), 0);
4440 nw64(TX_LOG_PAGE_HDL(channel), 0);
4442 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4443 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4444 nw64(TX_LOG_PAGE_VLD(channel), val);
4446 /* XXX TXDMA 32bit mode? XXX */
4448 return 0;
4451 static void niu_txc_enable_port(struct niu *np, int on)
4453 unsigned long flags;
4454 u64 val, mask;
4456 niu_lock_parent(np, flags);
4457 val = nr64(TXC_CONTROL);
4458 mask = (u64)1 << np->port;
4459 if (on) {
4460 val |= TXC_CONTROL_ENABLE | mask;
4461 } else {
4462 val &= ~mask;
4463 if ((val & ~TXC_CONTROL_ENABLE) == 0)
4464 val &= ~TXC_CONTROL_ENABLE;
4466 nw64(TXC_CONTROL, val);
4467 niu_unlock_parent(np, flags);
4470 static void niu_txc_set_imask(struct niu *np, u64 imask)
4472 unsigned long flags;
4473 u64 val;
4475 niu_lock_parent(np, flags);
4476 val = nr64(TXC_INT_MASK);
4477 val &= ~TXC_INT_MASK_VAL(np->port);
4478 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4479 niu_unlock_parent(np, flags);
4482 static void niu_txc_port_dma_enable(struct niu *np, int on)
4484 u64 val = 0;
4486 if (on) {
4487 int i;
4489 for (i = 0; i < np->num_tx_rings; i++)
4490 val |= (1 << np->tx_rings[i].tx_channel);
4492 nw64(TXC_PORT_DMA(np->port), val);
4495 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4497 int err, channel = rp->tx_channel;
4498 u64 val, ring_len;
4500 err = niu_tx_channel_stop(np, channel);
4501 if (err)
4502 return err;
4504 err = niu_tx_channel_reset(np, channel);
4505 if (err)
4506 return err;
4508 err = niu_tx_channel_lpage_init(np, channel);
4509 if (err)
4510 return err;
4512 nw64(TXC_DMA_MAX(channel), rp->max_burst);
4513 nw64(TX_ENT_MSK(channel), 0);
4515 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4516 TX_RNG_CFIG_STADDR)) {
4517 dev_err(np->device, PFX "%s: TX ring channel %d "
4518 "DMA addr (%llx) is not aligned.\n",
4519 np->dev->name, channel,
4520 (unsigned long long) rp->descr_dma);
4521 return -EINVAL;
4524 /* The length field in TX_RNG_CFIG is measured in 64-byte
4525 * blocks. rp->pending is the number of TX descriptors in
4526 * our ring, 8 bytes each, thus we divide by 8 bytes more
4527 * to get the proper value the chip wants.
4529 ring_len = (rp->pending / 8);
4531 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4532 rp->descr_dma);
4533 nw64(TX_RNG_CFIG(channel), val);
4535 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4536 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4537 dev_err(np->device, PFX "%s: TX ring channel %d "
4538 "MBOX addr (%llx) is has illegal bits.\n",
4539 np->dev->name, channel,
4540 (unsigned long long) rp->mbox_dma);
4541 return -EINVAL;
4543 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4544 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4546 nw64(TX_CS(channel), 0);
4548 rp->last_pkt_cnt = 0;
4550 return 0;
4553 static void niu_init_rdc_groups(struct niu *np)
4555 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4556 int i, first_table_num = tp->first_table_num;
4558 for (i = 0; i < tp->num_tables; i++) {
4559 struct rdc_table *tbl = &tp->tables[i];
4560 int this_table = first_table_num + i;
4561 int slot;
4563 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4564 nw64(RDC_TBL(this_table, slot),
4565 tbl->rxdma_channel[slot]);
4568 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4571 static void niu_init_drr_weight(struct niu *np)
4573 int type = phy_decode(np->parent->port_phy, np->port);
4574 u64 val;
4576 switch (type) {
4577 case PORT_TYPE_10G:
4578 val = PT_DRR_WEIGHT_DEFAULT_10G;
4579 break;
4581 case PORT_TYPE_1G:
4582 default:
4583 val = PT_DRR_WEIGHT_DEFAULT_1G;
4584 break;
4586 nw64(PT_DRR_WT(np->port), val);
4589 static int niu_init_hostinfo(struct niu *np)
4591 struct niu_parent *parent = np->parent;
4592 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4593 int i, err, num_alt = niu_num_alt_addr(np);
4594 int first_rdc_table = tp->first_table_num;
4596 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4597 if (err)
4598 return err;
4600 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4601 if (err)
4602 return err;
4604 for (i = 0; i < num_alt; i++) {
4605 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4606 if (err)
4607 return err;
4610 return 0;
4613 static int niu_rx_channel_reset(struct niu *np, int channel)
4615 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4616 RXDMA_CFIG1_RST, 1000, 10,
4617 "RXDMA_CFIG1");
4620 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4622 u64 val;
4624 nw64(RX_LOG_MASK1(channel), 0);
4625 nw64(RX_LOG_VAL1(channel), 0);
4626 nw64(RX_LOG_MASK2(channel), 0);
4627 nw64(RX_LOG_VAL2(channel), 0);
4628 nw64(RX_LOG_PAGE_RELO1(channel), 0);
4629 nw64(RX_LOG_PAGE_RELO2(channel), 0);
4630 nw64(RX_LOG_PAGE_HDL(channel), 0);
4632 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4633 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4634 nw64(RX_LOG_PAGE_VLD(channel), val);
4636 return 0;
4639 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4641 u64 val;
4643 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4644 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4645 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4646 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4647 nw64(RDC_RED_PARA(rp->rx_channel), val);
4650 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4652 u64 val = 0;
4654 switch (rp->rbr_block_size) {
4655 case 4 * 1024:
4656 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4657 break;
4658 case 8 * 1024:
4659 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4660 break;
4661 case 16 * 1024:
4662 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4663 break;
4664 case 32 * 1024:
4665 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4666 break;
4667 default:
4668 return -EINVAL;
4670 val |= RBR_CFIG_B_VLD2;
4671 switch (rp->rbr_sizes[2]) {
4672 case 2 * 1024:
4673 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4674 break;
4675 case 4 * 1024:
4676 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4677 break;
4678 case 8 * 1024:
4679 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4680 break;
4681 case 16 * 1024:
4682 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4683 break;
4685 default:
4686 return -EINVAL;
4688 val |= RBR_CFIG_B_VLD1;
4689 switch (rp->rbr_sizes[1]) {
4690 case 1 * 1024:
4691 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4692 break;
4693 case 2 * 1024:
4694 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4695 break;
4696 case 4 * 1024:
4697 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4698 break;
4699 case 8 * 1024:
4700 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4701 break;
4703 default:
4704 return -EINVAL;
4706 val |= RBR_CFIG_B_VLD0;
4707 switch (rp->rbr_sizes[0]) {
4708 case 256:
4709 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4710 break;
4711 case 512:
4712 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4713 break;
4714 case 1 * 1024:
4715 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4716 break;
4717 case 2 * 1024:
4718 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4719 break;
4721 default:
4722 return -EINVAL;
4725 *ret = val;
4726 return 0;
4729 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4731 u64 val = nr64(RXDMA_CFIG1(channel));
4732 int limit;
4734 if (on)
4735 val |= RXDMA_CFIG1_EN;
4736 else
4737 val &= ~RXDMA_CFIG1_EN;
4738 nw64(RXDMA_CFIG1(channel), val);
4740 limit = 1000;
4741 while (--limit > 0) {
4742 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4743 break;
4744 udelay(10);
4746 if (limit <= 0)
4747 return -ENODEV;
4748 return 0;
4751 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4753 int err, channel = rp->rx_channel;
4754 u64 val;
4756 err = niu_rx_channel_reset(np, channel);
4757 if (err)
4758 return err;
4760 err = niu_rx_channel_lpage_init(np, channel);
4761 if (err)
4762 return err;
4764 niu_rx_channel_wred_init(np, rp);
4766 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4767 nw64(RX_DMA_CTL_STAT(channel),
4768 (RX_DMA_CTL_STAT_MEX |
4769 RX_DMA_CTL_STAT_RCRTHRES |
4770 RX_DMA_CTL_STAT_RCRTO |
4771 RX_DMA_CTL_STAT_RBR_EMPTY));
4772 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4773 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4774 nw64(RBR_CFIG_A(channel),
4775 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4776 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4777 err = niu_compute_rbr_cfig_b(rp, &val);
4778 if (err)
4779 return err;
4780 nw64(RBR_CFIG_B(channel), val);
4781 nw64(RCRCFIG_A(channel),
4782 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4783 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4784 nw64(RCRCFIG_B(channel),
4785 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4786 RCRCFIG_B_ENTOUT |
4787 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4789 err = niu_enable_rx_channel(np, channel, 1);
4790 if (err)
4791 return err;
4793 nw64(RBR_KICK(channel), rp->rbr_index);
4795 val = nr64(RX_DMA_CTL_STAT(channel));
4796 val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4797 nw64(RX_DMA_CTL_STAT(channel), val);
4799 return 0;
4802 static int niu_init_rx_channels(struct niu *np)
4804 unsigned long flags;
4805 u64 seed = jiffies_64;
4806 int err, i;
4808 niu_lock_parent(np, flags);
4809 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4810 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4811 niu_unlock_parent(np, flags);
4813 /* XXX RXDMA 32bit mode? XXX */
4815 niu_init_rdc_groups(np);
4816 niu_init_drr_weight(np);
4818 err = niu_init_hostinfo(np);
4819 if (err)
4820 return err;
4822 for (i = 0; i < np->num_rx_rings; i++) {
4823 struct rx_ring_info *rp = &np->rx_rings[i];
4825 err = niu_init_one_rx_channel(np, rp);
4826 if (err)
4827 return err;
4830 return 0;
4833 static int niu_set_ip_frag_rule(struct niu *np)
4835 struct niu_parent *parent = np->parent;
4836 struct niu_classifier *cp = &np->clas;
4837 struct niu_tcam_entry *tp;
4838 int index, err;
4840 /* XXX fix this allocation scheme XXX */
4841 index = cp->tcam_index;
4842 tp = &parent->tcam[index];
4844 /* Note that the noport bit is the same in both ipv4 and
4845 * ipv6 format TCAM entries.
4847 memset(tp, 0, sizeof(*tp));
4848 tp->key[1] = TCAM_V4KEY1_NOPORT;
4849 tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4850 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4851 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4852 err = tcam_write(np, index, tp->key, tp->key_mask);
4853 if (err)
4854 return err;
4855 err = tcam_assoc_write(np, index, tp->assoc_data);
4856 if (err)
4857 return err;
4859 return 0;
4862 static int niu_init_classifier_hw(struct niu *np)
4864 struct niu_parent *parent = np->parent;
4865 struct niu_classifier *cp = &np->clas;
4866 int i, err;
4868 nw64(H1POLY, cp->h1_init);
4869 nw64(H2POLY, cp->h2_init);
4871 err = niu_init_hostinfo(np);
4872 if (err)
4873 return err;
4875 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4876 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4878 vlan_tbl_write(np, i, np->port,
4879 vp->vlan_pref, vp->rdc_num);
4882 for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4883 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4885 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4886 ap->rdc_num, ap->mac_pref);
4887 if (err)
4888 return err;
4891 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4892 int index = i - CLASS_CODE_USER_PROG1;
4894 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4895 if (err)
4896 return err;
4897 err = niu_set_flow_key(np, i, parent->flow_key[index]);
4898 if (err)
4899 return err;
4902 err = niu_set_ip_frag_rule(np);
4903 if (err)
4904 return err;
4906 tcam_enable(np, 1);
4908 return 0;
4911 static int niu_zcp_write(struct niu *np, int index, u64 *data)
4913 nw64(ZCP_RAM_DATA0, data[0]);
4914 nw64(ZCP_RAM_DATA1, data[1]);
4915 nw64(ZCP_RAM_DATA2, data[2]);
4916 nw64(ZCP_RAM_DATA3, data[3]);
4917 nw64(ZCP_RAM_DATA4, data[4]);
4918 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
4919 nw64(ZCP_RAM_ACC,
4920 (ZCP_RAM_ACC_WRITE |
4921 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4922 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4924 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4925 1000, 100);
4928 static int niu_zcp_read(struct niu *np, int index, u64 *data)
4930 int err;
4932 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4933 1000, 100);
4934 if (err) {
4935 dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
4936 "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4937 (unsigned long long) nr64(ZCP_RAM_ACC));
4938 return err;
4941 nw64(ZCP_RAM_ACC,
4942 (ZCP_RAM_ACC_READ |
4943 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4944 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4946 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4947 1000, 100);
4948 if (err) {
4949 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
4950 "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4951 (unsigned long long) nr64(ZCP_RAM_ACC));
4952 return err;
4955 data[0] = nr64(ZCP_RAM_DATA0);
4956 data[1] = nr64(ZCP_RAM_DATA1);
4957 data[2] = nr64(ZCP_RAM_DATA2);
4958 data[3] = nr64(ZCP_RAM_DATA3);
4959 data[4] = nr64(ZCP_RAM_DATA4);
4961 return 0;
4964 static void niu_zcp_cfifo_reset(struct niu *np)
4966 u64 val = nr64(RESET_CFIFO);
4968 val |= RESET_CFIFO_RST(np->port);
4969 nw64(RESET_CFIFO, val);
4970 udelay(10);
4972 val &= ~RESET_CFIFO_RST(np->port);
4973 nw64(RESET_CFIFO, val);
4976 static int niu_init_zcp(struct niu *np)
4978 u64 data[5], rbuf[5];
4979 int i, max, err;
4981 if (np->parent->plat_type != PLAT_TYPE_NIU) {
4982 if (np->port == 0 || np->port == 1)
4983 max = ATLAS_P0_P1_CFIFO_ENTRIES;
4984 else
4985 max = ATLAS_P2_P3_CFIFO_ENTRIES;
4986 } else
4987 max = NIU_CFIFO_ENTRIES;
4989 data[0] = 0;
4990 data[1] = 0;
4991 data[2] = 0;
4992 data[3] = 0;
4993 data[4] = 0;
4995 for (i = 0; i < max; i++) {
4996 err = niu_zcp_write(np, i, data);
4997 if (err)
4998 return err;
4999 err = niu_zcp_read(np, i, rbuf);
5000 if (err)
5001 return err;
5004 niu_zcp_cfifo_reset(np);
5005 nw64(CFIFO_ECC(np->port), 0);
5006 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5007 (void) nr64(ZCP_INT_STAT);
5008 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5010 return 0;
5013 static void niu_ipp_write(struct niu *np, int index, u64 *data)
5015 u64 val = nr64_ipp(IPP_CFIG);
5017 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5018 nw64_ipp(IPP_DFIFO_WR_PTR, index);
5019 nw64_ipp(IPP_DFIFO_WR0, data[0]);
5020 nw64_ipp(IPP_DFIFO_WR1, data[1]);
5021 nw64_ipp(IPP_DFIFO_WR2, data[2]);
5022 nw64_ipp(IPP_DFIFO_WR3, data[3]);
5023 nw64_ipp(IPP_DFIFO_WR4, data[4]);
5024 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5027 static void niu_ipp_read(struct niu *np, int index, u64 *data)
5029 nw64_ipp(IPP_DFIFO_RD_PTR, index);
5030 data[0] = nr64_ipp(IPP_DFIFO_RD0);
5031 data[1] = nr64_ipp(IPP_DFIFO_RD1);
5032 data[2] = nr64_ipp(IPP_DFIFO_RD2);
5033 data[3] = nr64_ipp(IPP_DFIFO_RD3);
5034 data[4] = nr64_ipp(IPP_DFIFO_RD4);
5037 static int niu_ipp_reset(struct niu *np)
5039 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5040 1000, 100, "IPP_CFIG");
5043 static int niu_init_ipp(struct niu *np)
5045 u64 data[5], rbuf[5], val;
5046 int i, max, err;
5048 if (np->parent->plat_type != PLAT_TYPE_NIU) {
5049 if (np->port == 0 || np->port == 1)
5050 max = ATLAS_P0_P1_DFIFO_ENTRIES;
5051 else
5052 max = ATLAS_P2_P3_DFIFO_ENTRIES;
5053 } else
5054 max = NIU_DFIFO_ENTRIES;
5056 data[0] = 0;
5057 data[1] = 0;
5058 data[2] = 0;
5059 data[3] = 0;
5060 data[4] = 0;
5062 for (i = 0; i < max; i++) {
5063 niu_ipp_write(np, i, data);
5064 niu_ipp_read(np, i, rbuf);
5067 (void) nr64_ipp(IPP_INT_STAT);
5068 (void) nr64_ipp(IPP_INT_STAT);
5070 err = niu_ipp_reset(np);
5071 if (err)
5072 return err;
5074 (void) nr64_ipp(IPP_PKT_DIS);
5075 (void) nr64_ipp(IPP_BAD_CS_CNT);
5076 (void) nr64_ipp(IPP_ECC);
5078 (void) nr64_ipp(IPP_INT_STAT);
5080 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5082 val = nr64_ipp(IPP_CFIG);
5083 val &= ~IPP_CFIG_IP_MAX_PKT;
5084 val |= (IPP_CFIG_IPP_ENABLE |
5085 IPP_CFIG_DFIFO_ECC_EN |
5086 IPP_CFIG_DROP_BAD_CRC |
5087 IPP_CFIG_CKSUM_EN |
5088 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5089 nw64_ipp(IPP_CFIG, val);
5091 return 0;
5094 static void niu_handle_led(struct niu *np, int status)
5096 u64 val;
5097 val = nr64_mac(XMAC_CONFIG);
5099 if ((np->flags & NIU_FLAGS_10G) != 0 &&
5100 (np->flags & NIU_FLAGS_FIBER) != 0) {
5101 if (status) {
5102 val |= XMAC_CONFIG_LED_POLARITY;
5103 val &= ~XMAC_CONFIG_FORCE_LED_ON;
5104 } else {
5105 val |= XMAC_CONFIG_FORCE_LED_ON;
5106 val &= ~XMAC_CONFIG_LED_POLARITY;
5110 nw64_mac(XMAC_CONFIG, val);
5113 static void niu_init_xif_xmac(struct niu *np)
5115 struct niu_link_config *lp = &np->link_config;
5116 u64 val;
5118 if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5119 val = nr64(MIF_CONFIG);
5120 val |= MIF_CONFIG_ATCA_GE;
5121 nw64(MIF_CONFIG, val);
5124 val = nr64_mac(XMAC_CONFIG);
5125 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5127 val |= XMAC_CONFIG_TX_OUTPUT_EN;
5129 if (lp->loopback_mode == LOOPBACK_MAC) {
5130 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5131 val |= XMAC_CONFIG_LOOPBACK;
5132 } else {
5133 val &= ~XMAC_CONFIG_LOOPBACK;
5136 if (np->flags & NIU_FLAGS_10G) {
5137 val &= ~XMAC_CONFIG_LFS_DISABLE;
5138 } else {
5139 val |= XMAC_CONFIG_LFS_DISABLE;
5140 if (!(np->flags & NIU_FLAGS_FIBER) &&
5141 !(np->flags & NIU_FLAGS_XCVR_SERDES))
5142 val |= XMAC_CONFIG_1G_PCS_BYPASS;
5143 else
5144 val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5147 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5149 if (lp->active_speed == SPEED_100)
5150 val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5151 else
5152 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5154 nw64_mac(XMAC_CONFIG, val);
5156 val = nr64_mac(XMAC_CONFIG);
5157 val &= ~XMAC_CONFIG_MODE_MASK;
5158 if (np->flags & NIU_FLAGS_10G) {
5159 val |= XMAC_CONFIG_MODE_XGMII;
5160 } else {
5161 if (lp->active_speed == SPEED_100)
5162 val |= XMAC_CONFIG_MODE_MII;
5163 else
5164 val |= XMAC_CONFIG_MODE_GMII;
5167 nw64_mac(XMAC_CONFIG, val);
5170 static void niu_init_xif_bmac(struct niu *np)
5172 struct niu_link_config *lp = &np->link_config;
5173 u64 val;
5175 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5177 if (lp->loopback_mode == LOOPBACK_MAC)
5178 val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5179 else
5180 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5182 if (lp->active_speed == SPEED_1000)
5183 val |= BMAC_XIF_CONFIG_GMII_MODE;
5184 else
5185 val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5187 val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5188 BMAC_XIF_CONFIG_LED_POLARITY);
5190 if (!(np->flags & NIU_FLAGS_10G) &&
5191 !(np->flags & NIU_FLAGS_FIBER) &&
5192 lp->active_speed == SPEED_100)
5193 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5194 else
5195 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5197 nw64_mac(BMAC_XIF_CONFIG, val);
5200 static void niu_init_xif(struct niu *np)
5202 if (np->flags & NIU_FLAGS_XMAC)
5203 niu_init_xif_xmac(np);
5204 else
5205 niu_init_xif_bmac(np);
5208 static void niu_pcs_mii_reset(struct niu *np)
5210 int limit = 1000;
5211 u64 val = nr64_pcs(PCS_MII_CTL);
5212 val |= PCS_MII_CTL_RST;
5213 nw64_pcs(PCS_MII_CTL, val);
5214 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5215 udelay(100);
5216 val = nr64_pcs(PCS_MII_CTL);
5220 static void niu_xpcs_reset(struct niu *np)
5222 int limit = 1000;
5223 u64 val = nr64_xpcs(XPCS_CONTROL1);
5224 val |= XPCS_CONTROL1_RESET;
5225 nw64_xpcs(XPCS_CONTROL1, val);
5226 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5227 udelay(100);
5228 val = nr64_xpcs(XPCS_CONTROL1);
5232 static int niu_init_pcs(struct niu *np)
5234 struct niu_link_config *lp = &np->link_config;
5235 u64 val;
5237 switch (np->flags & (NIU_FLAGS_10G |
5238 NIU_FLAGS_FIBER |
5239 NIU_FLAGS_XCVR_SERDES)) {
5240 case NIU_FLAGS_FIBER:
5241 /* 1G fiber */
5242 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5243 nw64_pcs(PCS_DPATH_MODE, 0);
5244 niu_pcs_mii_reset(np);
5245 break;
5247 case NIU_FLAGS_10G:
5248 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5249 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5250 /* 10G SERDES */
5251 if (!(np->flags & NIU_FLAGS_XMAC))
5252 return -EINVAL;
5254 /* 10G copper or fiber */
5255 val = nr64_mac(XMAC_CONFIG);
5256 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5257 nw64_mac(XMAC_CONFIG, val);
5259 niu_xpcs_reset(np);
5261 val = nr64_xpcs(XPCS_CONTROL1);
5262 if (lp->loopback_mode == LOOPBACK_PHY)
5263 val |= XPCS_CONTROL1_LOOPBACK;
5264 else
5265 val &= ~XPCS_CONTROL1_LOOPBACK;
5266 nw64_xpcs(XPCS_CONTROL1, val);
5268 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5269 (void) nr64_xpcs(XPCS_SYMERR_CNT01);
5270 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
5271 break;
5274 case NIU_FLAGS_XCVR_SERDES:
5275 /* 1G SERDES */
5276 niu_pcs_mii_reset(np);
5277 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5278 nw64_pcs(PCS_DPATH_MODE, 0);
5279 break;
5281 case 0:
5282 /* 1G copper */
5283 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5284 /* 1G RGMII FIBER */
5285 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5286 niu_pcs_mii_reset(np);
5287 break;
5289 default:
5290 return -EINVAL;
5293 return 0;
5296 static int niu_reset_tx_xmac(struct niu *np)
5298 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5299 (XTXMAC_SW_RST_REG_RS |
5300 XTXMAC_SW_RST_SOFT_RST),
5301 1000, 100, "XTXMAC_SW_RST");
5304 static int niu_reset_tx_bmac(struct niu *np)
5306 int limit;
5308 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5309 limit = 1000;
5310 while (--limit >= 0) {
5311 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5312 break;
5313 udelay(100);
5315 if (limit < 0) {
5316 dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5317 "BTXMAC_SW_RST[%llx]\n",
5318 np->port,
5319 (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5320 return -ENODEV;
5323 return 0;
5326 static int niu_reset_tx_mac(struct niu *np)
5328 if (np->flags & NIU_FLAGS_XMAC)
5329 return niu_reset_tx_xmac(np);
5330 else
5331 return niu_reset_tx_bmac(np);
5334 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5336 u64 val;
5338 val = nr64_mac(XMAC_MIN);
5339 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5340 XMAC_MIN_RX_MIN_PKT_SIZE);
5341 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5342 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5343 nw64_mac(XMAC_MIN, val);
5345 nw64_mac(XMAC_MAX, max);
5347 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5349 val = nr64_mac(XMAC_IPG);
5350 if (np->flags & NIU_FLAGS_10G) {
5351 val &= ~XMAC_IPG_IPG_XGMII;
5352 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5353 } else {
5354 val &= ~XMAC_IPG_IPG_MII_GMII;
5355 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5357 nw64_mac(XMAC_IPG, val);
5359 val = nr64_mac(XMAC_CONFIG);
5360 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5361 XMAC_CONFIG_STRETCH_MODE |
5362 XMAC_CONFIG_VAR_MIN_IPG_EN |
5363 XMAC_CONFIG_TX_ENABLE);
5364 nw64_mac(XMAC_CONFIG, val);
5366 nw64_mac(TXMAC_FRM_CNT, 0);
5367 nw64_mac(TXMAC_BYTE_CNT, 0);
5370 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5372 u64 val;
5374 nw64_mac(BMAC_MIN_FRAME, min);
5375 nw64_mac(BMAC_MAX_FRAME, max);
5377 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5378 nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5379 nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5381 val = nr64_mac(BTXMAC_CONFIG);
5382 val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5383 BTXMAC_CONFIG_ENABLE);
5384 nw64_mac(BTXMAC_CONFIG, val);
5387 static void niu_init_tx_mac(struct niu *np)
5389 u64 min, max;
5391 min = 64;
5392 if (np->dev->mtu > ETH_DATA_LEN)
5393 max = 9216;
5394 else
5395 max = 1522;
5397 /* The XMAC_MIN register only accepts values for TX min which
5398 * have the low 3 bits cleared.
5400 BUILD_BUG_ON(min & 0x7);
5402 if (np->flags & NIU_FLAGS_XMAC)
5403 niu_init_tx_xmac(np, min, max);
5404 else
5405 niu_init_tx_bmac(np, min, max);
5408 static int niu_reset_rx_xmac(struct niu *np)
5410 int limit;
5412 nw64_mac(XRXMAC_SW_RST,
5413 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5414 limit = 1000;
5415 while (--limit >= 0) {
5416 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5417 XRXMAC_SW_RST_SOFT_RST)))
5418 break;
5419 udelay(100);
5421 if (limit < 0) {
5422 dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5423 "XRXMAC_SW_RST[%llx]\n",
5424 np->port,
5425 (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5426 return -ENODEV;
5429 return 0;
5432 static int niu_reset_rx_bmac(struct niu *np)
5434 int limit;
5436 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5437 limit = 1000;
5438 while (--limit >= 0) {
5439 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5440 break;
5441 udelay(100);
5443 if (limit < 0) {
5444 dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5445 "BRXMAC_SW_RST[%llx]\n",
5446 np->port,
5447 (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5448 return -ENODEV;
5451 return 0;
5454 static int niu_reset_rx_mac(struct niu *np)
5456 if (np->flags & NIU_FLAGS_XMAC)
5457 return niu_reset_rx_xmac(np);
5458 else
5459 return niu_reset_rx_bmac(np);
5462 static void niu_init_rx_xmac(struct niu *np)
5464 struct niu_parent *parent = np->parent;
5465 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5466 int first_rdc_table = tp->first_table_num;
5467 unsigned long i;
5468 u64 val;
5470 nw64_mac(XMAC_ADD_FILT0, 0);
5471 nw64_mac(XMAC_ADD_FILT1, 0);
5472 nw64_mac(XMAC_ADD_FILT2, 0);
5473 nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5474 nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5475 for (i = 0; i < MAC_NUM_HASH; i++)
5476 nw64_mac(XMAC_HASH_TBL(i), 0);
5477 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5478 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5479 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5481 val = nr64_mac(XMAC_CONFIG);
5482 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5483 XMAC_CONFIG_PROMISCUOUS |
5484 XMAC_CONFIG_PROMISC_GROUP |
5485 XMAC_CONFIG_ERR_CHK_DIS |
5486 XMAC_CONFIG_RX_CRC_CHK_DIS |
5487 XMAC_CONFIG_RESERVED_MULTICAST |
5488 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5489 XMAC_CONFIG_ADDR_FILTER_EN |
5490 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5491 XMAC_CONFIG_STRIP_CRC |
5492 XMAC_CONFIG_PASS_FLOW_CTRL |
5493 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5494 val |= (XMAC_CONFIG_HASH_FILTER_EN);
5495 nw64_mac(XMAC_CONFIG, val);
5497 nw64_mac(RXMAC_BT_CNT, 0);
5498 nw64_mac(RXMAC_BC_FRM_CNT, 0);
5499 nw64_mac(RXMAC_MC_FRM_CNT, 0);
5500 nw64_mac(RXMAC_FRAG_CNT, 0);
5501 nw64_mac(RXMAC_HIST_CNT1, 0);
5502 nw64_mac(RXMAC_HIST_CNT2, 0);
5503 nw64_mac(RXMAC_HIST_CNT3, 0);
5504 nw64_mac(RXMAC_HIST_CNT4, 0);
5505 nw64_mac(RXMAC_HIST_CNT5, 0);
5506 nw64_mac(RXMAC_HIST_CNT6, 0);
5507 nw64_mac(RXMAC_HIST_CNT7, 0);
5508 nw64_mac(RXMAC_MPSZER_CNT, 0);
5509 nw64_mac(RXMAC_CRC_ER_CNT, 0);
5510 nw64_mac(RXMAC_CD_VIO_CNT, 0);
5511 nw64_mac(LINK_FAULT_CNT, 0);
5514 static void niu_init_rx_bmac(struct niu *np)
5516 struct niu_parent *parent = np->parent;
5517 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5518 int first_rdc_table = tp->first_table_num;
5519 unsigned long i;
5520 u64 val;
5522 nw64_mac(BMAC_ADD_FILT0, 0);
5523 nw64_mac(BMAC_ADD_FILT1, 0);
5524 nw64_mac(BMAC_ADD_FILT2, 0);
5525 nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5526 nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5527 for (i = 0; i < MAC_NUM_HASH; i++)
5528 nw64_mac(BMAC_HASH_TBL(i), 0);
5529 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5530 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5531 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5533 val = nr64_mac(BRXMAC_CONFIG);
5534 val &= ~(BRXMAC_CONFIG_ENABLE |
5535 BRXMAC_CONFIG_STRIP_PAD |
5536 BRXMAC_CONFIG_STRIP_FCS |
5537 BRXMAC_CONFIG_PROMISC |
5538 BRXMAC_CONFIG_PROMISC_GRP |
5539 BRXMAC_CONFIG_ADDR_FILT_EN |
5540 BRXMAC_CONFIG_DISCARD_DIS);
5541 val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5542 nw64_mac(BRXMAC_CONFIG, val);
5544 val = nr64_mac(BMAC_ADDR_CMPEN);
5545 val |= BMAC_ADDR_CMPEN_EN0;
5546 nw64_mac(BMAC_ADDR_CMPEN, val);
5549 static void niu_init_rx_mac(struct niu *np)
5551 niu_set_primary_mac(np, np->dev->dev_addr);
5553 if (np->flags & NIU_FLAGS_XMAC)
5554 niu_init_rx_xmac(np);
5555 else
5556 niu_init_rx_bmac(np);
5559 static void niu_enable_tx_xmac(struct niu *np, int on)
5561 u64 val = nr64_mac(XMAC_CONFIG);
5563 if (on)
5564 val |= XMAC_CONFIG_TX_ENABLE;
5565 else
5566 val &= ~XMAC_CONFIG_TX_ENABLE;
5567 nw64_mac(XMAC_CONFIG, val);
5570 static void niu_enable_tx_bmac(struct niu *np, int on)
5572 u64 val = nr64_mac(BTXMAC_CONFIG);
5574 if (on)
5575 val |= BTXMAC_CONFIG_ENABLE;
5576 else
5577 val &= ~BTXMAC_CONFIG_ENABLE;
5578 nw64_mac(BTXMAC_CONFIG, val);
5581 static void niu_enable_tx_mac(struct niu *np, int on)
5583 if (np->flags & NIU_FLAGS_XMAC)
5584 niu_enable_tx_xmac(np, on);
5585 else
5586 niu_enable_tx_bmac(np, on);
5589 static void niu_enable_rx_xmac(struct niu *np, int on)
5591 u64 val = nr64_mac(XMAC_CONFIG);
5593 val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5594 XMAC_CONFIG_PROMISCUOUS);
5596 if (np->flags & NIU_FLAGS_MCAST)
5597 val |= XMAC_CONFIG_HASH_FILTER_EN;
5598 if (np->flags & NIU_FLAGS_PROMISC)
5599 val |= XMAC_CONFIG_PROMISCUOUS;
5601 if (on)
5602 val |= XMAC_CONFIG_RX_MAC_ENABLE;
5603 else
5604 val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5605 nw64_mac(XMAC_CONFIG, val);
5608 static void niu_enable_rx_bmac(struct niu *np, int on)
5610 u64 val = nr64_mac(BRXMAC_CONFIG);
5612 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5613 BRXMAC_CONFIG_PROMISC);
5615 if (np->flags & NIU_FLAGS_MCAST)
5616 val |= BRXMAC_CONFIG_HASH_FILT_EN;
5617 if (np->flags & NIU_FLAGS_PROMISC)
5618 val |= BRXMAC_CONFIG_PROMISC;
5620 if (on)
5621 val |= BRXMAC_CONFIG_ENABLE;
5622 else
5623 val &= ~BRXMAC_CONFIG_ENABLE;
5624 nw64_mac(BRXMAC_CONFIG, val);
5627 static void niu_enable_rx_mac(struct niu *np, int on)
5629 if (np->flags & NIU_FLAGS_XMAC)
5630 niu_enable_rx_xmac(np, on);
5631 else
5632 niu_enable_rx_bmac(np, on);
5635 static int niu_init_mac(struct niu *np)
5637 int err;
5639 niu_init_xif(np);
5640 err = niu_init_pcs(np);
5641 if (err)
5642 return err;
5644 err = niu_reset_tx_mac(np);
5645 if (err)
5646 return err;
5647 niu_init_tx_mac(np);
5648 err = niu_reset_rx_mac(np);
5649 if (err)
5650 return err;
5651 niu_init_rx_mac(np);
5653 /* This looks hookey but the RX MAC reset we just did will
5654 * undo some of the state we setup in niu_init_tx_mac() so we
5655 * have to call it again. In particular, the RX MAC reset will
5656 * set the XMAC_MAX register back to it's default value.
5658 niu_init_tx_mac(np);
5659 niu_enable_tx_mac(np, 1);
5661 niu_enable_rx_mac(np, 1);
5663 return 0;
5666 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5668 (void) niu_tx_channel_stop(np, rp->tx_channel);
5671 static void niu_stop_tx_channels(struct niu *np)
5673 int i;
5675 for (i = 0; i < np->num_tx_rings; i++) {
5676 struct tx_ring_info *rp = &np->tx_rings[i];
5678 niu_stop_one_tx_channel(np, rp);
5682 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5684 (void) niu_tx_channel_reset(np, rp->tx_channel);
5687 static void niu_reset_tx_channels(struct niu *np)
5689 int i;
5691 for (i = 0; i < np->num_tx_rings; i++) {
5692 struct tx_ring_info *rp = &np->tx_rings[i];
5694 niu_reset_one_tx_channel(np, rp);
5698 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5700 (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5703 static void niu_stop_rx_channels(struct niu *np)
5705 int i;
5707 for (i = 0; i < np->num_rx_rings; i++) {
5708 struct rx_ring_info *rp = &np->rx_rings[i];
5710 niu_stop_one_rx_channel(np, rp);
5714 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5716 int channel = rp->rx_channel;
5718 (void) niu_rx_channel_reset(np, channel);
5719 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5720 nw64(RX_DMA_CTL_STAT(channel), 0);
5721 (void) niu_enable_rx_channel(np, channel, 0);
5724 static void niu_reset_rx_channels(struct niu *np)
5726 int i;
5728 for (i = 0; i < np->num_rx_rings; i++) {
5729 struct rx_ring_info *rp = &np->rx_rings[i];
5731 niu_reset_one_rx_channel(np, rp);
5735 static void niu_disable_ipp(struct niu *np)
5737 u64 rd, wr, val;
5738 int limit;
5740 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5741 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5742 limit = 100;
5743 while (--limit >= 0 && (rd != wr)) {
5744 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5745 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5747 if (limit < 0 &&
5748 (rd != 0 && wr != 1)) {
5749 dev_err(np->device, PFX "%s: IPP would not quiesce, "
5750 "rd_ptr[%llx] wr_ptr[%llx]\n",
5751 np->dev->name,
5752 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5753 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5756 val = nr64_ipp(IPP_CFIG);
5757 val &= ~(IPP_CFIG_IPP_ENABLE |
5758 IPP_CFIG_DFIFO_ECC_EN |
5759 IPP_CFIG_DROP_BAD_CRC |
5760 IPP_CFIG_CKSUM_EN);
5761 nw64_ipp(IPP_CFIG, val);
5763 (void) niu_ipp_reset(np);
5766 static int niu_init_hw(struct niu *np)
5768 int i, err;
5770 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5771 niu_txc_enable_port(np, 1);
5772 niu_txc_port_dma_enable(np, 1);
5773 niu_txc_set_imask(np, 0);
5775 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5776 for (i = 0; i < np->num_tx_rings; i++) {
5777 struct tx_ring_info *rp = &np->tx_rings[i];
5779 err = niu_init_one_tx_channel(np, rp);
5780 if (err)
5781 return err;
5784 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5785 err = niu_init_rx_channels(np);
5786 if (err)
5787 goto out_uninit_tx_channels;
5789 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
5790 err = niu_init_classifier_hw(np);
5791 if (err)
5792 goto out_uninit_rx_channels;
5794 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
5795 err = niu_init_zcp(np);
5796 if (err)
5797 goto out_uninit_rx_channels;
5799 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
5800 err = niu_init_ipp(np);
5801 if (err)
5802 goto out_uninit_rx_channels;
5804 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
5805 err = niu_init_mac(np);
5806 if (err)
5807 goto out_uninit_ipp;
5809 return 0;
5811 out_uninit_ipp:
5812 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
5813 niu_disable_ipp(np);
5815 out_uninit_rx_channels:
5816 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
5817 niu_stop_rx_channels(np);
5818 niu_reset_rx_channels(np);
5820 out_uninit_tx_channels:
5821 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
5822 niu_stop_tx_channels(np);
5823 niu_reset_tx_channels(np);
5825 return err;
5828 static void niu_stop_hw(struct niu *np)
5830 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
5831 niu_enable_interrupts(np, 0);
5833 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
5834 niu_enable_rx_mac(np, 0);
5836 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
5837 niu_disable_ipp(np);
5839 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
5840 niu_stop_tx_channels(np);
5842 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
5843 niu_stop_rx_channels(np);
5845 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
5846 niu_reset_tx_channels(np);
5848 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
5849 niu_reset_rx_channels(np);
5852 static int niu_request_irq(struct niu *np)
5854 int i, j, err;
5856 err = 0;
5857 for (i = 0; i < np->num_ldg; i++) {
5858 struct niu_ldg *lp = &np->ldg[i];
5860 err = request_irq(lp->irq, niu_interrupt,
5861 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5862 np->dev->name, lp);
5863 if (err)
5864 goto out_free_irqs;
5868 return 0;
5870 out_free_irqs:
5871 for (j = 0; j < i; j++) {
5872 struct niu_ldg *lp = &np->ldg[j];
5874 free_irq(lp->irq, lp);
5876 return err;
5879 static void niu_free_irq(struct niu *np)
5881 int i;
5883 for (i = 0; i < np->num_ldg; i++) {
5884 struct niu_ldg *lp = &np->ldg[i];
5886 free_irq(lp->irq, lp);
5890 static void niu_enable_napi(struct niu *np)
5892 int i;
5894 for (i = 0; i < np->num_ldg; i++)
5895 napi_enable(&np->ldg[i].napi);
5898 static void niu_disable_napi(struct niu *np)
5900 int i;
5902 for (i = 0; i < np->num_ldg; i++)
5903 napi_disable(&np->ldg[i].napi);
5906 static int niu_open(struct net_device *dev)
5908 struct niu *np = netdev_priv(dev);
5909 int err;
5911 netif_carrier_off(dev);
5913 err = niu_alloc_channels(np);
5914 if (err)
5915 goto out_err;
5917 err = niu_enable_interrupts(np, 0);
5918 if (err)
5919 goto out_free_channels;
5921 err = niu_request_irq(np);
5922 if (err)
5923 goto out_free_channels;
5925 niu_enable_napi(np);
5927 spin_lock_irq(&np->lock);
5929 err = niu_init_hw(np);
5930 if (!err) {
5931 init_timer(&np->timer);
5932 np->timer.expires = jiffies + HZ;
5933 np->timer.data = (unsigned long) np;
5934 np->timer.function = niu_timer;
5936 err = niu_enable_interrupts(np, 1);
5937 if (err)
5938 niu_stop_hw(np);
5941 spin_unlock_irq(&np->lock);
5943 if (err) {
5944 niu_disable_napi(np);
5945 goto out_free_irq;
5948 netif_tx_start_all_queues(dev);
5950 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
5951 netif_carrier_on(dev);
5953 add_timer(&np->timer);
5955 return 0;
5957 out_free_irq:
5958 niu_free_irq(np);
5960 out_free_channels:
5961 niu_free_channels(np);
5963 out_err:
5964 return err;
5967 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
5969 cancel_work_sync(&np->reset_task);
5971 niu_disable_napi(np);
5972 netif_tx_stop_all_queues(dev);
5974 del_timer_sync(&np->timer);
5976 spin_lock_irq(&np->lock);
5978 niu_stop_hw(np);
5980 spin_unlock_irq(&np->lock);
5983 static int niu_close(struct net_device *dev)
5985 struct niu *np = netdev_priv(dev);
5987 niu_full_shutdown(np, dev);
5989 niu_free_irq(np);
5991 niu_free_channels(np);
5993 niu_handle_led(np, 0);
5995 return 0;
5998 static void niu_sync_xmac_stats(struct niu *np)
6000 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6002 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6003 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6005 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6006 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6007 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6008 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6009 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6010 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6011 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6012 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6013 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6014 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6015 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6016 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6017 mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6018 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6019 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6020 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6023 static void niu_sync_bmac_stats(struct niu *np)
6025 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6027 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6028 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6030 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6031 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6032 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6033 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6036 static void niu_sync_mac_stats(struct niu *np)
6038 if (np->flags & NIU_FLAGS_XMAC)
6039 niu_sync_xmac_stats(np);
6040 else
6041 niu_sync_bmac_stats(np);
6044 static void niu_get_rx_stats(struct niu *np)
6046 unsigned long pkts, dropped, errors, bytes;
6047 int i;
6049 pkts = dropped = errors = bytes = 0;
6050 for (i = 0; i < np->num_rx_rings; i++) {
6051 struct rx_ring_info *rp = &np->rx_rings[i];
6053 pkts += rp->rx_packets;
6054 bytes += rp->rx_bytes;
6055 dropped += rp->rx_dropped;
6056 errors += rp->rx_errors;
6058 np->net_stats.rx_packets = pkts;
6059 np->net_stats.rx_bytes = bytes;
6060 np->net_stats.rx_dropped = dropped;
6061 np->net_stats.rx_errors = errors;
6064 static void niu_get_tx_stats(struct niu *np)
6066 unsigned long pkts, errors, bytes;
6067 int i;
6069 pkts = errors = bytes = 0;
6070 for (i = 0; i < np->num_tx_rings; i++) {
6071 struct tx_ring_info *rp = &np->tx_rings[i];
6073 pkts += rp->tx_packets;
6074 bytes += rp->tx_bytes;
6075 errors += rp->tx_errors;
6077 np->net_stats.tx_packets = pkts;
6078 np->net_stats.tx_bytes = bytes;
6079 np->net_stats.tx_errors = errors;
6082 static struct net_device_stats *niu_get_stats(struct net_device *dev)
6084 struct niu *np = netdev_priv(dev);
6086 niu_get_rx_stats(np);
6087 niu_get_tx_stats(np);
6089 return &np->net_stats;
6092 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6094 int i;
6096 for (i = 0; i < 16; i++)
6097 nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6100 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6102 int i;
6104 for (i = 0; i < 16; i++)
6105 nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6108 static void niu_load_hash(struct niu *np, u16 *hash)
6110 if (np->flags & NIU_FLAGS_XMAC)
6111 niu_load_hash_xmac(np, hash);
6112 else
6113 niu_load_hash_bmac(np, hash);
6116 static void niu_set_rx_mode(struct net_device *dev)
6118 struct niu *np = netdev_priv(dev);
6119 int i, alt_cnt, err;
6120 struct dev_addr_list *addr;
6121 unsigned long flags;
6122 u16 hash[16] = { 0, };
6124 spin_lock_irqsave(&np->lock, flags);
6125 niu_enable_rx_mac(np, 0);
6127 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6128 if (dev->flags & IFF_PROMISC)
6129 np->flags |= NIU_FLAGS_PROMISC;
6130 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6131 np->flags |= NIU_FLAGS_MCAST;
6133 alt_cnt = dev->uc_count;
6134 if (alt_cnt > niu_num_alt_addr(np)) {
6135 alt_cnt = 0;
6136 np->flags |= NIU_FLAGS_PROMISC;
6139 if (alt_cnt) {
6140 int index = 0;
6142 for (addr = dev->uc_list; addr; addr = addr->next) {
6143 err = niu_set_alt_mac(np, index,
6144 addr->da_addr);
6145 if (err)
6146 printk(KERN_WARNING PFX "%s: Error %d "
6147 "adding alt mac %d\n",
6148 dev->name, err, index);
6149 err = niu_enable_alt_mac(np, index, 1);
6150 if (err)
6151 printk(KERN_WARNING PFX "%s: Error %d "
6152 "enabling alt mac %d\n",
6153 dev->name, err, index);
6155 index++;
6157 } else {
6158 int alt_start;
6159 if (np->flags & NIU_FLAGS_XMAC)
6160 alt_start = 0;
6161 else
6162 alt_start = 1;
6163 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6164 err = niu_enable_alt_mac(np, i, 0);
6165 if (err)
6166 printk(KERN_WARNING PFX "%s: Error %d "
6167 "disabling alt mac %d\n",
6168 dev->name, err, i);
6171 if (dev->flags & IFF_ALLMULTI) {
6172 for (i = 0; i < 16; i++)
6173 hash[i] = 0xffff;
6174 } else if (dev->mc_count > 0) {
6175 for (addr = dev->mc_list; addr; addr = addr->next) {
6176 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
6178 crc >>= 24;
6179 hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6183 if (np->flags & NIU_FLAGS_MCAST)
6184 niu_load_hash(np, hash);
6186 niu_enable_rx_mac(np, 1);
6187 spin_unlock_irqrestore(&np->lock, flags);
6190 static int niu_set_mac_addr(struct net_device *dev, void *p)
6192 struct niu *np = netdev_priv(dev);
6193 struct sockaddr *addr = p;
6194 unsigned long flags;
6196 if (!is_valid_ether_addr(addr->sa_data))
6197 return -EINVAL;
6199 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6201 if (!netif_running(dev))
6202 return 0;
6204 spin_lock_irqsave(&np->lock, flags);
6205 niu_enable_rx_mac(np, 0);
6206 niu_set_primary_mac(np, dev->dev_addr);
6207 niu_enable_rx_mac(np, 1);
6208 spin_unlock_irqrestore(&np->lock, flags);
6210 return 0;
6213 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6215 return -EOPNOTSUPP;
6218 static void niu_netif_stop(struct niu *np)
6220 np->dev->trans_start = jiffies; /* prevent tx timeout */
6222 niu_disable_napi(np);
6224 netif_tx_disable(np->dev);
6227 static void niu_netif_start(struct niu *np)
6229 /* NOTE: unconditional netif_wake_queue is only appropriate
6230 * so long as all callers are assured to have free tx slots
6231 * (such as after niu_init_hw).
6233 netif_tx_wake_all_queues(np->dev);
6235 niu_enable_napi(np);
6237 niu_enable_interrupts(np, 1);
6240 static void niu_reset_buffers(struct niu *np)
6242 int i, j, k, err;
6244 if (np->rx_rings) {
6245 for (i = 0; i < np->num_rx_rings; i++) {
6246 struct rx_ring_info *rp = &np->rx_rings[i];
6248 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6249 struct page *page;
6251 page = rp->rxhash[j];
6252 while (page) {
6253 struct page *next =
6254 (struct page *) page->mapping;
6255 u64 base = page->index;
6256 base = base >> RBR_DESCR_ADDR_SHIFT;
6257 rp->rbr[k++] = cpu_to_le32(base);
6258 page = next;
6261 for (; k < MAX_RBR_RING_SIZE; k++) {
6262 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6263 if (unlikely(err))
6264 break;
6267 rp->rbr_index = rp->rbr_table_size - 1;
6268 rp->rcr_index = 0;
6269 rp->rbr_pending = 0;
6270 rp->rbr_refill_pending = 0;
6273 if (np->tx_rings) {
6274 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i];
6277 for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6278 if (rp->tx_buffs[j].skb)
6279 (void) release_tx_packet(np, rp, j);
6282 rp->pending = MAX_TX_RING_SIZE;
6283 rp->prod = 0;
6284 rp->cons = 0;
6285 rp->wrap_bit = 0;
6290 static void niu_reset_task(struct work_struct *work)
6292 struct niu *np = container_of(work, struct niu, reset_task);
6293 unsigned long flags;
6294 int err;
6296 spin_lock_irqsave(&np->lock, flags);
6297 if (!netif_running(np->dev)) {
6298 spin_unlock_irqrestore(&np->lock, flags);
6299 return;
6302 spin_unlock_irqrestore(&np->lock, flags);
6304 del_timer_sync(&np->timer);
6306 niu_netif_stop(np);
6308 spin_lock_irqsave(&np->lock, flags);
6310 niu_stop_hw(np);
6312 spin_unlock_irqrestore(&np->lock, flags);
6314 niu_reset_buffers(np);
6316 spin_lock_irqsave(&np->lock, flags);
6318 err = niu_init_hw(np);
6319 if (!err) {
6320 np->timer.expires = jiffies + HZ;
6321 add_timer(&np->timer);
6322 niu_netif_start(np);
6325 spin_unlock_irqrestore(&np->lock, flags);
6328 static void niu_tx_timeout(struct net_device *dev)
6330 struct niu *np = netdev_priv(dev);
6332 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
6333 dev->name);
6335 schedule_work(&np->reset_task);
6338 static void niu_set_txd(struct tx_ring_info *rp, int index,
6339 u64 mapping, u64 len, u64 mark,
6340 u64 n_frags)
6342 __le64 *desc = &rp->descr[index];
6344 *desc = cpu_to_le64(mark |
6345 (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6346 (len << TX_DESC_TR_LEN_SHIFT) |
6347 (mapping & TX_DESC_SAD));
6350 static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6351 u64 pad_bytes, u64 len)
6353 u16 eth_proto, eth_proto_inner;
6354 u64 csum_bits, l3off, ihl, ret;
6355 u8 ip_proto;
6356 int ipv6;
6358 eth_proto = be16_to_cpu(ehdr->h_proto);
6359 eth_proto_inner = eth_proto;
6360 if (eth_proto == ETH_P_8021Q) {
6361 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6362 __be16 val = vp->h_vlan_encapsulated_proto;
6364 eth_proto_inner = be16_to_cpu(val);
6367 ipv6 = ihl = 0;
6368 switch (skb->protocol) {
6369 case __constant_htons(ETH_P_IP):
6370 ip_proto = ip_hdr(skb)->protocol;
6371 ihl = ip_hdr(skb)->ihl;
6372 break;
6373 case __constant_htons(ETH_P_IPV6):
6374 ip_proto = ipv6_hdr(skb)->nexthdr;
6375 ihl = (40 >> 2);
6376 ipv6 = 1;
6377 break;
6378 default:
6379 ip_proto = ihl = 0;
6380 break;
6383 csum_bits = TXHDR_CSUM_NONE;
6384 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6385 u64 start, stuff;
6387 csum_bits = (ip_proto == IPPROTO_TCP ?
6388 TXHDR_CSUM_TCP :
6389 (ip_proto == IPPROTO_UDP ?
6390 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6392 start = skb_transport_offset(skb) -
6393 (pad_bytes + sizeof(struct tx_pkt_hdr));
6394 stuff = start + skb->csum_offset;
6396 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6397 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6400 l3off = skb_network_offset(skb) -
6401 (pad_bytes + sizeof(struct tx_pkt_hdr));
6403 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6404 (len << TXHDR_LEN_SHIFT) |
6405 ((l3off / 2) << TXHDR_L3START_SHIFT) |
6406 (ihl << TXHDR_IHL_SHIFT) |
6407 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6408 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6409 (ipv6 ? TXHDR_IP_VER : 0) |
6410 csum_bits);
6412 return ret;
6415 static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6417 struct niu *np = netdev_priv(dev);
6418 unsigned long align, headroom;
6419 struct netdev_queue *txq;
6420 struct tx_ring_info *rp;
6421 struct tx_pkt_hdr *tp;
6422 unsigned int len, nfg;
6423 struct ethhdr *ehdr;
6424 int prod, i, tlen;
6425 u64 mapping, mrk;
6427 i = skb_get_queue_mapping(skb);
6428 rp = &np->tx_rings[i];
6429 txq = netdev_get_tx_queue(dev, i);
6431 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6432 netif_tx_stop_queue(txq);
6433 dev_err(np->device, PFX "%s: BUG! Tx ring full when "
6434 "queue awake!\n", dev->name);
6435 rp->tx_errors++;
6436 return NETDEV_TX_BUSY;
6439 if (skb->len < ETH_ZLEN) {
6440 unsigned int pad_bytes = ETH_ZLEN - skb->len;
6442 if (skb_pad(skb, pad_bytes))
6443 goto out;
6444 skb_put(skb, pad_bytes);
6447 len = sizeof(struct tx_pkt_hdr) + 15;
6448 if (skb_headroom(skb) < len) {
6449 struct sk_buff *skb_new;
6451 skb_new = skb_realloc_headroom(skb, len);
6452 if (!skb_new) {
6453 rp->tx_errors++;
6454 goto out_drop;
6456 kfree_skb(skb);
6457 skb = skb_new;
6458 } else
6459 skb_orphan(skb);
6461 align = ((unsigned long) skb->data & (16 - 1));
6462 headroom = align + sizeof(struct tx_pkt_hdr);
6464 ehdr = (struct ethhdr *) skb->data;
6465 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6467 len = skb->len - sizeof(struct tx_pkt_hdr);
6468 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6469 tp->resv = 0;
6471 len = skb_headlen(skb);
6472 mapping = np->ops->map_single(np->device, skb->data,
6473 len, DMA_TO_DEVICE);
6475 prod = rp->prod;
6477 rp->tx_buffs[prod].skb = skb;
6478 rp->tx_buffs[prod].mapping = mapping;
6480 mrk = TX_DESC_SOP;
6481 if (++rp->mark_counter == rp->mark_freq) {
6482 rp->mark_counter = 0;
6483 mrk |= TX_DESC_MARK;
6484 rp->mark_pending++;
6487 tlen = len;
6488 nfg = skb_shinfo(skb)->nr_frags;
6489 while (tlen > 0) {
6490 tlen -= MAX_TX_DESC_LEN;
6491 nfg++;
6494 while (len > 0) {
6495 unsigned int this_len = len;
6497 if (this_len > MAX_TX_DESC_LEN)
6498 this_len = MAX_TX_DESC_LEN;
6500 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6501 mrk = nfg = 0;
6503 prod = NEXT_TX(rp, prod);
6504 mapping += this_len;
6505 len -= this_len;
6508 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6509 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6511 len = frag->size;
6512 mapping = np->ops->map_page(np->device, frag->page,
6513 frag->page_offset, len,
6514 DMA_TO_DEVICE);
6516 rp->tx_buffs[prod].skb = NULL;
6517 rp->tx_buffs[prod].mapping = mapping;
6519 niu_set_txd(rp, prod, mapping, len, 0, 0);
6521 prod = NEXT_TX(rp, prod);
6524 if (prod < rp->prod)
6525 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6526 rp->prod = prod;
6528 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6530 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6531 netif_tx_stop_queue(txq);
6532 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6533 netif_tx_wake_queue(txq);
6536 dev->trans_start = jiffies;
6538 out:
6539 return NETDEV_TX_OK;
6541 out_drop:
6542 rp->tx_errors++;
6543 kfree_skb(skb);
6544 goto out;
6547 static int niu_change_mtu(struct net_device *dev, int new_mtu)
6549 struct niu *np = netdev_priv(dev);
6550 int err, orig_jumbo, new_jumbo;
6552 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6553 return -EINVAL;
6555 orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6556 new_jumbo = (new_mtu > ETH_DATA_LEN);
6558 dev->mtu = new_mtu;
6560 if (!netif_running(dev) ||
6561 (orig_jumbo == new_jumbo))
6562 return 0;
6564 niu_full_shutdown(np, dev);
6566 niu_free_channels(np);
6568 niu_enable_napi(np);
6570 err = niu_alloc_channels(np);
6571 if (err)
6572 return err;
6574 spin_lock_irq(&np->lock);
6576 err = niu_init_hw(np);
6577 if (!err) {
6578 init_timer(&np->timer);
6579 np->timer.expires = jiffies + HZ;
6580 np->timer.data = (unsigned long) np;
6581 np->timer.function = niu_timer;
6583 err = niu_enable_interrupts(np, 1);
6584 if (err)
6585 niu_stop_hw(np);
6588 spin_unlock_irq(&np->lock);
6590 if (!err) {
6591 netif_tx_start_all_queues(dev);
6592 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6593 netif_carrier_on(dev);
6595 add_timer(&np->timer);
6598 return err;
6601 static void niu_get_drvinfo(struct net_device *dev,
6602 struct ethtool_drvinfo *info)
6604 struct niu *np = netdev_priv(dev);
6605 struct niu_vpd *vpd = &np->vpd;
6607 strcpy(info->driver, DRV_MODULE_NAME);
6608 strcpy(info->version, DRV_MODULE_VERSION);
6609 sprintf(info->fw_version, "%d.%d",
6610 vpd->fcode_major, vpd->fcode_minor);
6611 if (np->parent->plat_type != PLAT_TYPE_NIU)
6612 strcpy(info->bus_info, pci_name(np->pdev));
6615 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6617 struct niu *np = netdev_priv(dev);
6618 struct niu_link_config *lp;
6620 lp = &np->link_config;
6622 memset(cmd, 0, sizeof(*cmd));
6623 cmd->phy_address = np->phy_addr;
6624 cmd->supported = lp->supported;
6625 cmd->advertising = lp->advertising;
6626 cmd->autoneg = lp->autoneg;
6627 cmd->speed = lp->active_speed;
6628 cmd->duplex = lp->active_duplex;
6630 return 0;
6633 static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6635 return -EINVAL;
6638 static u32 niu_get_msglevel(struct net_device *dev)
6640 struct niu *np = netdev_priv(dev);
6641 return np->msg_enable;
6644 static void niu_set_msglevel(struct net_device *dev, u32 value)
6646 struct niu *np = netdev_priv(dev);
6647 np->msg_enable = value;
6650 static int niu_get_eeprom_len(struct net_device *dev)
6652 struct niu *np = netdev_priv(dev);
6654 return np->eeprom_len;
6657 static int niu_get_eeprom(struct net_device *dev,
6658 struct ethtool_eeprom *eeprom, u8 *data)
6660 struct niu *np = netdev_priv(dev);
6661 u32 offset, len, val;
6663 offset = eeprom->offset;
6664 len = eeprom->len;
6666 if (offset + len < offset)
6667 return -EINVAL;
6668 if (offset >= np->eeprom_len)
6669 return -EINVAL;
6670 if (offset + len > np->eeprom_len)
6671 len = eeprom->len = np->eeprom_len - offset;
6673 if (offset & 3) {
6674 u32 b_offset, b_count;
6676 b_offset = offset & 3;
6677 b_count = 4 - b_offset;
6678 if (b_count > len)
6679 b_count = len;
6681 val = nr64(ESPC_NCR((offset - b_offset) / 4));
6682 memcpy(data, ((char *)&val) + b_offset, b_count);
6683 data += b_count;
6684 len -= b_count;
6685 offset += b_count;
6687 while (len >= 4) {
6688 val = nr64(ESPC_NCR(offset / 4));
6689 memcpy(data, &val, 4);
6690 data += 4;
6691 len -= 4;
6692 offset += 4;
6694 if (len) {
6695 val = nr64(ESPC_NCR(offset / 4));
6696 memcpy(data, &val, len);
6698 return 0;
6701 static int niu_ethflow_to_class(int flow_type, u64 *class)
6703 switch (flow_type) {
6704 case TCP_V4_FLOW:
6705 *class = CLASS_CODE_TCP_IPV4;
6706 break;
6707 case UDP_V4_FLOW:
6708 *class = CLASS_CODE_UDP_IPV4;
6709 break;
6710 case AH_ESP_V4_FLOW:
6711 *class = CLASS_CODE_AH_ESP_IPV4;
6712 break;
6713 case SCTP_V4_FLOW:
6714 *class = CLASS_CODE_SCTP_IPV4;
6715 break;
6716 case TCP_V6_FLOW:
6717 *class = CLASS_CODE_TCP_IPV6;
6718 break;
6719 case UDP_V6_FLOW:
6720 *class = CLASS_CODE_UDP_IPV6;
6721 break;
6722 case AH_ESP_V6_FLOW:
6723 *class = CLASS_CODE_AH_ESP_IPV6;
6724 break;
6725 case SCTP_V6_FLOW:
6726 *class = CLASS_CODE_SCTP_IPV6;
6727 break;
6728 default:
6729 return 0;
6732 return 1;
6735 static u64 niu_flowkey_to_ethflow(u64 flow_key)
6737 u64 ethflow = 0;
6739 if (flow_key & FLOW_KEY_PORT)
6740 ethflow |= RXH_DEV_PORT;
6741 if (flow_key & FLOW_KEY_L2DA)
6742 ethflow |= RXH_L2DA;
6743 if (flow_key & FLOW_KEY_VLAN)
6744 ethflow |= RXH_VLAN;
6745 if (flow_key & FLOW_KEY_IPSA)
6746 ethflow |= RXH_IP_SRC;
6747 if (flow_key & FLOW_KEY_IPDA)
6748 ethflow |= RXH_IP_DST;
6749 if (flow_key & FLOW_KEY_PROTO)
6750 ethflow |= RXH_L3_PROTO;
6751 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
6752 ethflow |= RXH_L4_B_0_1;
6753 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
6754 ethflow |= RXH_L4_B_2_3;
6756 return ethflow;
6760 static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
6762 u64 key = 0;
6764 if (ethflow & RXH_DEV_PORT)
6765 key |= FLOW_KEY_PORT;
6766 if (ethflow & RXH_L2DA)
6767 key |= FLOW_KEY_L2DA;
6768 if (ethflow & RXH_VLAN)
6769 key |= FLOW_KEY_VLAN;
6770 if (ethflow & RXH_IP_SRC)
6771 key |= FLOW_KEY_IPSA;
6772 if (ethflow & RXH_IP_DST)
6773 key |= FLOW_KEY_IPDA;
6774 if (ethflow & RXH_L3_PROTO)
6775 key |= FLOW_KEY_PROTO;
6776 if (ethflow & RXH_L4_B_0_1)
6777 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
6778 if (ethflow & RXH_L4_B_2_3)
6779 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
6781 *flow_key = key;
6783 return 1;
6787 static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6789 struct niu *np = netdev_priv(dev);
6790 u64 class;
6792 cmd->data = 0;
6794 if (!niu_ethflow_to_class(cmd->flow_type, &class))
6795 return -EINVAL;
6797 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6798 TCAM_KEY_DISC)
6799 cmd->data = RXH_DISCARD;
6800 else
6802 cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
6803 CLASS_CODE_USER_PROG1]);
6804 return 0;
6807 static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6809 struct niu *np = netdev_priv(dev);
6810 u64 class;
6811 u64 flow_key = 0;
6812 unsigned long flags;
6814 if (!niu_ethflow_to_class(cmd->flow_type, &class))
6815 return -EINVAL;
6817 if (class < CLASS_CODE_USER_PROG1 ||
6818 class > CLASS_CODE_SCTP_IPV6)
6819 return -EINVAL;
6821 if (cmd->data & RXH_DISCARD) {
6822 niu_lock_parent(np, flags);
6823 flow_key = np->parent->tcam_key[class -
6824 CLASS_CODE_USER_PROG1];
6825 flow_key |= TCAM_KEY_DISC;
6826 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6827 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6828 niu_unlock_parent(np, flags);
6829 return 0;
6830 } else {
6831 /* Discard was set before, but is not set now */
6832 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6833 TCAM_KEY_DISC) {
6834 niu_lock_parent(np, flags);
6835 flow_key = np->parent->tcam_key[class -
6836 CLASS_CODE_USER_PROG1];
6837 flow_key &= ~TCAM_KEY_DISC;
6838 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
6839 flow_key);
6840 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
6841 flow_key;
6842 niu_unlock_parent(np, flags);
6846 if (!niu_ethflow_to_flowkey(cmd->data, &flow_key))
6847 return -EINVAL;
6849 niu_lock_parent(np, flags);
6850 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6851 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6852 niu_unlock_parent(np, flags);
6854 return 0;
6857 static const struct {
6858 const char string[ETH_GSTRING_LEN];
6859 } niu_xmac_stat_keys[] = {
6860 { "tx_frames" },
6861 { "tx_bytes" },
6862 { "tx_fifo_errors" },
6863 { "tx_overflow_errors" },
6864 { "tx_max_pkt_size_errors" },
6865 { "tx_underflow_errors" },
6866 { "rx_local_faults" },
6867 { "rx_remote_faults" },
6868 { "rx_link_faults" },
6869 { "rx_align_errors" },
6870 { "rx_frags" },
6871 { "rx_mcasts" },
6872 { "rx_bcasts" },
6873 { "rx_hist_cnt1" },
6874 { "rx_hist_cnt2" },
6875 { "rx_hist_cnt3" },
6876 { "rx_hist_cnt4" },
6877 { "rx_hist_cnt5" },
6878 { "rx_hist_cnt6" },
6879 { "rx_hist_cnt7" },
6880 { "rx_octets" },
6881 { "rx_code_violations" },
6882 { "rx_len_errors" },
6883 { "rx_crc_errors" },
6884 { "rx_underflows" },
6885 { "rx_overflows" },
6886 { "pause_off_state" },
6887 { "pause_on_state" },
6888 { "pause_received" },
6891 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
6893 static const struct {
6894 const char string[ETH_GSTRING_LEN];
6895 } niu_bmac_stat_keys[] = {
6896 { "tx_underflow_errors" },
6897 { "tx_max_pkt_size_errors" },
6898 { "tx_bytes" },
6899 { "tx_frames" },
6900 { "rx_overflows" },
6901 { "rx_frames" },
6902 { "rx_align_errors" },
6903 { "rx_crc_errors" },
6904 { "rx_len_errors" },
6905 { "pause_off_state" },
6906 { "pause_on_state" },
6907 { "pause_received" },
6910 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
6912 static const struct {
6913 const char string[ETH_GSTRING_LEN];
6914 } niu_rxchan_stat_keys[] = {
6915 { "rx_channel" },
6916 { "rx_packets" },
6917 { "rx_bytes" },
6918 { "rx_dropped" },
6919 { "rx_errors" },
6922 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
6924 static const struct {
6925 const char string[ETH_GSTRING_LEN];
6926 } niu_txchan_stat_keys[] = {
6927 { "tx_channel" },
6928 { "tx_packets" },
6929 { "tx_bytes" },
6930 { "tx_errors" },
6933 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
6935 static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6937 struct niu *np = netdev_priv(dev);
6938 int i;
6940 if (stringset != ETH_SS_STATS)
6941 return;
6943 if (np->flags & NIU_FLAGS_XMAC) {
6944 memcpy(data, niu_xmac_stat_keys,
6945 sizeof(niu_xmac_stat_keys));
6946 data += sizeof(niu_xmac_stat_keys);
6947 } else {
6948 memcpy(data, niu_bmac_stat_keys,
6949 sizeof(niu_bmac_stat_keys));
6950 data += sizeof(niu_bmac_stat_keys);
6952 for (i = 0; i < np->num_rx_rings; i++) {
6953 memcpy(data, niu_rxchan_stat_keys,
6954 sizeof(niu_rxchan_stat_keys));
6955 data += sizeof(niu_rxchan_stat_keys);
6957 for (i = 0; i < np->num_tx_rings; i++) {
6958 memcpy(data, niu_txchan_stat_keys,
6959 sizeof(niu_txchan_stat_keys));
6960 data += sizeof(niu_txchan_stat_keys);
6964 static int niu_get_stats_count(struct net_device *dev)
6966 struct niu *np = netdev_priv(dev);
6968 return ((np->flags & NIU_FLAGS_XMAC ?
6969 NUM_XMAC_STAT_KEYS :
6970 NUM_BMAC_STAT_KEYS) +
6971 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
6972 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS));
6975 static void niu_get_ethtool_stats(struct net_device *dev,
6976 struct ethtool_stats *stats, u64 *data)
6978 struct niu *np = netdev_priv(dev);
6979 int i;
6981 niu_sync_mac_stats(np);
6982 if (np->flags & NIU_FLAGS_XMAC) {
6983 memcpy(data, &np->mac_stats.xmac,
6984 sizeof(struct niu_xmac_stats));
6985 data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
6986 } else {
6987 memcpy(data, &np->mac_stats.bmac,
6988 sizeof(struct niu_bmac_stats));
6989 data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
6991 for (i = 0; i < np->num_rx_rings; i++) {
6992 struct rx_ring_info *rp = &np->rx_rings[i];
6994 data[0] = rp->rx_channel;
6995 data[1] = rp->rx_packets;
6996 data[2] = rp->rx_bytes;
6997 data[3] = rp->rx_dropped;
6998 data[4] = rp->rx_errors;
6999 data += 5;
7001 for (i = 0; i < np->num_tx_rings; i++) {
7002 struct tx_ring_info *rp = &np->tx_rings[i];
7004 data[0] = rp->tx_channel;
7005 data[1] = rp->tx_packets;
7006 data[2] = rp->tx_bytes;
7007 data[3] = rp->tx_errors;
7008 data += 4;
7012 static u64 niu_led_state_save(struct niu *np)
7014 if (np->flags & NIU_FLAGS_XMAC)
7015 return nr64_mac(XMAC_CONFIG);
7016 else
7017 return nr64_mac(BMAC_XIF_CONFIG);
7020 static void niu_led_state_restore(struct niu *np, u64 val)
7022 if (np->flags & NIU_FLAGS_XMAC)
7023 nw64_mac(XMAC_CONFIG, val);
7024 else
7025 nw64_mac(BMAC_XIF_CONFIG, val);
7028 static void niu_force_led(struct niu *np, int on)
7030 u64 val, reg, bit;
7032 if (np->flags & NIU_FLAGS_XMAC) {
7033 reg = XMAC_CONFIG;
7034 bit = XMAC_CONFIG_FORCE_LED_ON;
7035 } else {
7036 reg = BMAC_XIF_CONFIG;
7037 bit = BMAC_XIF_CONFIG_LINK_LED;
7040 val = nr64_mac(reg);
7041 if (on)
7042 val |= bit;
7043 else
7044 val &= ~bit;
7045 nw64_mac(reg, val);
7048 static int niu_phys_id(struct net_device *dev, u32 data)
7050 struct niu *np = netdev_priv(dev);
7051 u64 orig_led_state;
7052 int i;
7054 if (!netif_running(dev))
7055 return -EAGAIN;
7057 if (data == 0)
7058 data = 2;
7060 orig_led_state = niu_led_state_save(np);
7061 for (i = 0; i < (data * 2); i++) {
7062 int on = ((i % 2) == 0);
7064 niu_force_led(np, on);
7066 if (msleep_interruptible(500))
7067 break;
7069 niu_led_state_restore(np, orig_led_state);
7071 return 0;
7074 static const struct ethtool_ops niu_ethtool_ops = {
7075 .get_drvinfo = niu_get_drvinfo,
7076 .get_link = ethtool_op_get_link,
7077 .get_msglevel = niu_get_msglevel,
7078 .set_msglevel = niu_set_msglevel,
7079 .get_eeprom_len = niu_get_eeprom_len,
7080 .get_eeprom = niu_get_eeprom,
7081 .get_settings = niu_get_settings,
7082 .set_settings = niu_set_settings,
7083 .get_strings = niu_get_strings,
7084 .get_stats_count = niu_get_stats_count,
7085 .get_ethtool_stats = niu_get_ethtool_stats,
7086 .phys_id = niu_phys_id,
7087 .get_rxhash = niu_get_hash_opts,
7088 .set_rxhash = niu_set_hash_opts,
7091 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7092 int ldg, int ldn)
7094 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7095 return -EINVAL;
7096 if (ldn < 0 || ldn > LDN_MAX)
7097 return -EINVAL;
7099 parent->ldg_map[ldn] = ldg;
7101 if (np->parent->plat_type == PLAT_TYPE_NIU) {
7102 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7103 * the firmware, and we're not supposed to change them.
7104 * Validate the mapping, because if it's wrong we probably
7105 * won't get any interrupts and that's painful to debug.
7107 if (nr64(LDG_NUM(ldn)) != ldg) {
7108 dev_err(np->device, PFX "Port %u, mis-matched "
7109 "LDG assignment "
7110 "for ldn %d, should be %d is %llu\n",
7111 np->port, ldn, ldg,
7112 (unsigned long long) nr64(LDG_NUM(ldn)));
7113 return -EINVAL;
7115 } else
7116 nw64(LDG_NUM(ldn), ldg);
7118 return 0;
7121 static int niu_set_ldg_timer_res(struct niu *np, int res)
7123 if (res < 0 || res > LDG_TIMER_RES_VAL)
7124 return -EINVAL;
7127 nw64(LDG_TIMER_RES, res);
7129 return 0;
7132 static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7134 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7135 (func < 0 || func > 3) ||
7136 (vector < 0 || vector > 0x1f))
7137 return -EINVAL;
7139 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7141 return 0;
7144 static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
7146 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7147 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7148 int limit;
7150 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7151 return -EINVAL;
7153 frame = frame_base;
7154 nw64(ESPC_PIO_STAT, frame);
7155 limit = 64;
7156 do {
7157 udelay(5);
7158 frame = nr64(ESPC_PIO_STAT);
7159 if (frame & ESPC_PIO_STAT_READ_END)
7160 break;
7161 } while (limit--);
7162 if (!(frame & ESPC_PIO_STAT_READ_END)) {
7163 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
7164 (unsigned long long) frame);
7165 return -ENODEV;
7168 frame = frame_base;
7169 nw64(ESPC_PIO_STAT, frame);
7170 limit = 64;
7171 do {
7172 udelay(5);
7173 frame = nr64(ESPC_PIO_STAT);
7174 if (frame & ESPC_PIO_STAT_READ_END)
7175 break;
7176 } while (limit--);
7177 if (!(frame & ESPC_PIO_STAT_READ_END)) {
7178 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
7179 (unsigned long long) frame);
7180 return -ENODEV;
7183 frame = nr64(ESPC_PIO_STAT);
7184 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
7187 static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
7189 int err = niu_pci_eeprom_read(np, off);
7190 u16 val;
7192 if (err < 0)
7193 return err;
7194 val = (err << 8);
7195 err = niu_pci_eeprom_read(np, off + 1);
7196 if (err < 0)
7197 return err;
7198 val |= (err & 0xff);
7200 return val;
7203 static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
7205 int err = niu_pci_eeprom_read(np, off);
7206 u16 val;
7208 if (err < 0)
7209 return err;
7211 val = (err & 0xff);
7212 err = niu_pci_eeprom_read(np, off + 1);
7213 if (err < 0)
7214 return err;
7216 val |= (err & 0xff) << 8;
7218 return val;
7221 static int __devinit niu_pci_vpd_get_propname(struct niu *np,
7222 u32 off,
7223 char *namebuf,
7224 int namebuf_len)
7226 int i;
7228 for (i = 0; i < namebuf_len; i++) {
7229 int err = niu_pci_eeprom_read(np, off + i);
7230 if (err < 0)
7231 return err;
7232 *namebuf++ = err;
7233 if (!err)
7234 break;
7236 if (i >= namebuf_len)
7237 return -EINVAL;
7239 return i + 1;
7242 static void __devinit niu_vpd_parse_version(struct niu *np)
7244 struct niu_vpd *vpd = &np->vpd;
7245 int len = strlen(vpd->version) + 1;
7246 const char *s = vpd->version;
7247 int i;
7249 for (i = 0; i < len - 5; i++) {
7250 if (!strncmp(s + i, "FCode ", 5))
7251 break;
7253 if (i >= len - 5)
7254 return;
7256 s += i + 5;
7257 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
7259 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7260 vpd->fcode_major, vpd->fcode_minor);
7261 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
7262 (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
7263 vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
7264 np->flags |= NIU_FLAGS_VPD_VALID;
7267 /* ESPC_PIO_EN_ENABLE must be set */
7268 static int __devinit niu_pci_vpd_scan_props(struct niu *np,
7269 u32 start, u32 end)
7271 unsigned int found_mask = 0;
7272 #define FOUND_MASK_MODEL 0x00000001
7273 #define FOUND_MASK_BMODEL 0x00000002
7274 #define FOUND_MASK_VERS 0x00000004
7275 #define FOUND_MASK_MAC 0x00000008
7276 #define FOUND_MASK_NMAC 0x00000010
7277 #define FOUND_MASK_PHY 0x00000020
7278 #define FOUND_MASK_ALL 0x0000003f
7280 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
7281 start, end);
7282 while (start < end) {
7283 int len, err, instance, type, prop_len;
7284 char namebuf[64];
7285 u8 *prop_buf;
7286 int max_len;
7288 if (found_mask == FOUND_MASK_ALL) {
7289 niu_vpd_parse_version(np);
7290 return 1;
7293 err = niu_pci_eeprom_read(np, start + 2);
7294 if (err < 0)
7295 return err;
7296 len = err;
7297 start += 3;
7299 instance = niu_pci_eeprom_read(np, start);
7300 type = niu_pci_eeprom_read(np, start + 3);
7301 prop_len = niu_pci_eeprom_read(np, start + 4);
7302 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
7303 if (err < 0)
7304 return err;
7306 prop_buf = NULL;
7307 max_len = 0;
7308 if (!strcmp(namebuf, "model")) {
7309 prop_buf = np->vpd.model;
7310 max_len = NIU_VPD_MODEL_MAX;
7311 found_mask |= FOUND_MASK_MODEL;
7312 } else if (!strcmp(namebuf, "board-model")) {
7313 prop_buf = np->vpd.board_model;
7314 max_len = NIU_VPD_BD_MODEL_MAX;
7315 found_mask |= FOUND_MASK_BMODEL;
7316 } else if (!strcmp(namebuf, "version")) {
7317 prop_buf = np->vpd.version;
7318 max_len = NIU_VPD_VERSION_MAX;
7319 found_mask |= FOUND_MASK_VERS;
7320 } else if (!strcmp(namebuf, "local-mac-address")) {
7321 prop_buf = np->vpd.local_mac;
7322 max_len = ETH_ALEN;
7323 found_mask |= FOUND_MASK_MAC;
7324 } else if (!strcmp(namebuf, "num-mac-addresses")) {
7325 prop_buf = &np->vpd.mac_num;
7326 max_len = 1;
7327 found_mask |= FOUND_MASK_NMAC;
7328 } else if (!strcmp(namebuf, "phy-type")) {
7329 prop_buf = np->vpd.phy_type;
7330 max_len = NIU_VPD_PHY_TYPE_MAX;
7331 found_mask |= FOUND_MASK_PHY;
7334 if (max_len && prop_len > max_len) {
7335 dev_err(np->device, PFX "Property '%s' length (%d) is "
7336 "too long.\n", namebuf, prop_len);
7337 return -EINVAL;
7340 if (prop_buf) {
7341 u32 off = start + 5 + err;
7342 int i;
7344 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
7345 "len[%d]\n", namebuf, prop_len);
7346 for (i = 0; i < prop_len; i++)
7347 *prop_buf++ = niu_pci_eeprom_read(np, off + i);
7350 start += len;
7353 return 0;
7356 /* ESPC_PIO_EN_ENABLE must be set */
7357 static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
7359 u32 offset;
7360 int err;
7362 err = niu_pci_eeprom_read16_swp(np, start + 1);
7363 if (err < 0)
7364 return;
7366 offset = err + 3;
7368 while (start + offset < ESPC_EEPROM_SIZE) {
7369 u32 here = start + offset;
7370 u32 end;
7372 err = niu_pci_eeprom_read(np, here);
7373 if (err != 0x90)
7374 return;
7376 err = niu_pci_eeprom_read16_swp(np, here + 1);
7377 if (err < 0)
7378 return;
7380 here = start + offset + 3;
7381 end = start + offset + err;
7383 offset += err;
7385 err = niu_pci_vpd_scan_props(np, here, end);
7386 if (err < 0 || err == 1)
7387 return;
7391 /* ESPC_PIO_EN_ENABLE must be set */
7392 static u32 __devinit niu_pci_vpd_offset(struct niu *np)
7394 u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
7395 int err;
7397 while (start < end) {
7398 ret = start;
7400 /* ROM header signature? */
7401 err = niu_pci_eeprom_read16(np, start + 0);
7402 if (err != 0x55aa)
7403 return 0;
7405 /* Apply offset to PCI data structure. */
7406 err = niu_pci_eeprom_read16(np, start + 23);
7407 if (err < 0)
7408 return 0;
7409 start += err;
7411 /* Check for "PCIR" signature. */
7412 err = niu_pci_eeprom_read16(np, start + 0);
7413 if (err != 0x5043)
7414 return 0;
7415 err = niu_pci_eeprom_read16(np, start + 2);
7416 if (err != 0x4952)
7417 return 0;
7419 /* Check for OBP image type. */
7420 err = niu_pci_eeprom_read(np, start + 20);
7421 if (err < 0)
7422 return 0;
7423 if (err != 0x01) {
7424 err = niu_pci_eeprom_read(np, ret + 2);
7425 if (err < 0)
7426 return 0;
7428 start = ret + (err * 512);
7429 continue;
7432 err = niu_pci_eeprom_read16_swp(np, start + 8);
7433 if (err < 0)
7434 return err;
7435 ret += err;
7437 err = niu_pci_eeprom_read(np, ret + 0);
7438 if (err != 0x82)
7439 return 0;
7441 return ret;
7444 return 0;
7447 static int __devinit niu_phy_type_prop_decode(struct niu *np,
7448 const char *phy_prop)
7450 if (!strcmp(phy_prop, "mif")) {
7451 /* 1G copper, MII */
7452 np->flags &= ~(NIU_FLAGS_FIBER |
7453 NIU_FLAGS_10G);
7454 np->mac_xcvr = MAC_XCVR_MII;
7455 } else if (!strcmp(phy_prop, "xgf")) {
7456 /* 10G fiber, XPCS */
7457 np->flags |= (NIU_FLAGS_10G |
7458 NIU_FLAGS_FIBER);
7459 np->mac_xcvr = MAC_XCVR_XPCS;
7460 } else if (!strcmp(phy_prop, "pcs")) {
7461 /* 1G fiber, PCS */
7462 np->flags &= ~NIU_FLAGS_10G;
7463 np->flags |= NIU_FLAGS_FIBER;
7464 np->mac_xcvr = MAC_XCVR_PCS;
7465 } else if (!strcmp(phy_prop, "xgc")) {
7466 /* 10G copper, XPCS */
7467 np->flags |= NIU_FLAGS_10G;
7468 np->flags &= ~NIU_FLAGS_FIBER;
7469 np->mac_xcvr = MAC_XCVR_XPCS;
7470 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
7471 /* 10G Serdes or 1G Serdes, default to 10G */
7472 np->flags |= NIU_FLAGS_10G;
7473 np->flags &= ~NIU_FLAGS_FIBER;
7474 np->flags |= NIU_FLAGS_XCVR_SERDES;
7475 np->mac_xcvr = MAC_XCVR_XPCS;
7476 } else {
7477 return -EINVAL;
7479 return 0;
7482 static int niu_pci_vpd_get_nports(struct niu *np)
7484 int ports = 0;
7486 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
7487 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
7488 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
7489 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
7490 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
7491 ports = 4;
7492 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
7493 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
7494 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
7495 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
7496 ports = 2;
7499 return ports;
7502 static void __devinit niu_pci_vpd_validate(struct niu *np)
7504 struct net_device *dev = np->dev;
7505 struct niu_vpd *vpd = &np->vpd;
7506 u8 val8;
7508 if (!is_valid_ether_addr(&vpd->local_mac[0])) {
7509 dev_err(np->device, PFX "VPD MAC invalid, "
7510 "falling back to SPROM.\n");
7512 np->flags &= ~NIU_FLAGS_VPD_VALID;
7513 return;
7516 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7517 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7518 np->flags |= NIU_FLAGS_10G;
7519 np->flags &= ~NIU_FLAGS_FIBER;
7520 np->flags |= NIU_FLAGS_XCVR_SERDES;
7521 np->mac_xcvr = MAC_XCVR_PCS;
7522 if (np->port > 1) {
7523 np->flags |= NIU_FLAGS_FIBER;
7524 np->flags &= ~NIU_FLAGS_10G;
7526 if (np->flags & NIU_FLAGS_10G)
7527 np->mac_xcvr = MAC_XCVR_XPCS;
7528 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
7529 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
7530 NIU_FLAGS_HOTPLUG_PHY);
7531 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
7532 dev_err(np->device, PFX "Illegal phy string [%s].\n",
7533 np->vpd.phy_type);
7534 dev_err(np->device, PFX "Falling back to SPROM.\n");
7535 np->flags &= ~NIU_FLAGS_VPD_VALID;
7536 return;
7539 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
7541 val8 = dev->perm_addr[5];
7542 dev->perm_addr[5] += np->port;
7543 if (dev->perm_addr[5] < val8)
7544 dev->perm_addr[4]++;
7546 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7549 static int __devinit niu_pci_probe_sprom(struct niu *np)
7551 struct net_device *dev = np->dev;
7552 int len, i;
7553 u64 val, sum;
7554 u8 val8;
7556 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
7557 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
7558 len = val / 4;
7560 np->eeprom_len = len;
7562 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
7564 sum = 0;
7565 for (i = 0; i < len; i++) {
7566 val = nr64(ESPC_NCR(i));
7567 sum += (val >> 0) & 0xff;
7568 sum += (val >> 8) & 0xff;
7569 sum += (val >> 16) & 0xff;
7570 sum += (val >> 24) & 0xff;
7572 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
7573 if ((sum & 0xff) != 0xab) {
7574 dev_err(np->device, PFX "Bad SPROM checksum "
7575 "(%x, should be 0xab)\n", (int) (sum & 0xff));
7576 return -EINVAL;
7579 val = nr64(ESPC_PHY_TYPE);
7580 switch (np->port) {
7581 case 0:
7582 val8 = (val & ESPC_PHY_TYPE_PORT0) >>
7583 ESPC_PHY_TYPE_PORT0_SHIFT;
7584 break;
7585 case 1:
7586 val8 = (val & ESPC_PHY_TYPE_PORT1) >>
7587 ESPC_PHY_TYPE_PORT1_SHIFT;
7588 break;
7589 case 2:
7590 val8 = (val & ESPC_PHY_TYPE_PORT2) >>
7591 ESPC_PHY_TYPE_PORT2_SHIFT;
7592 break;
7593 case 3:
7594 val8 = (val & ESPC_PHY_TYPE_PORT3) >>
7595 ESPC_PHY_TYPE_PORT3_SHIFT;
7596 break;
7597 default:
7598 dev_err(np->device, PFX "Bogus port number %u\n",
7599 np->port);
7600 return -EINVAL;
7602 niudbg(PROBE, "SPROM: PHY type %x\n", val8);
7604 switch (val8) {
7605 case ESPC_PHY_TYPE_1G_COPPER:
7606 /* 1G copper, MII */
7607 np->flags &= ~(NIU_FLAGS_FIBER |
7608 NIU_FLAGS_10G);
7609 np->mac_xcvr = MAC_XCVR_MII;
7610 break;
7612 case ESPC_PHY_TYPE_1G_FIBER:
7613 /* 1G fiber, PCS */
7614 np->flags &= ~NIU_FLAGS_10G;
7615 np->flags |= NIU_FLAGS_FIBER;
7616 np->mac_xcvr = MAC_XCVR_PCS;
7617 break;
7619 case ESPC_PHY_TYPE_10G_COPPER:
7620 /* 10G copper, XPCS */
7621 np->flags |= NIU_FLAGS_10G;
7622 np->flags &= ~NIU_FLAGS_FIBER;
7623 np->mac_xcvr = MAC_XCVR_XPCS;
7624 break;
7626 case ESPC_PHY_TYPE_10G_FIBER:
7627 /* 10G fiber, XPCS */
7628 np->flags |= (NIU_FLAGS_10G |
7629 NIU_FLAGS_FIBER);
7630 np->mac_xcvr = MAC_XCVR_XPCS;
7631 break;
7633 default:
7634 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
7635 return -EINVAL;
7638 val = nr64(ESPC_MAC_ADDR0);
7639 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
7640 (unsigned long long) val);
7641 dev->perm_addr[0] = (val >> 0) & 0xff;
7642 dev->perm_addr[1] = (val >> 8) & 0xff;
7643 dev->perm_addr[2] = (val >> 16) & 0xff;
7644 dev->perm_addr[3] = (val >> 24) & 0xff;
7646 val = nr64(ESPC_MAC_ADDR1);
7647 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
7648 (unsigned long long) val);
7649 dev->perm_addr[4] = (val >> 0) & 0xff;
7650 dev->perm_addr[5] = (val >> 8) & 0xff;
7652 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
7653 dev_err(np->device, PFX "SPROM MAC address invalid\n");
7654 dev_err(np->device, PFX "[ \n");
7655 for (i = 0; i < 6; i++)
7656 printk("%02x ", dev->perm_addr[i]);
7657 printk("]\n");
7658 return -EINVAL;
7661 val8 = dev->perm_addr[5];
7662 dev->perm_addr[5] += np->port;
7663 if (dev->perm_addr[5] < val8)
7664 dev->perm_addr[4]++;
7666 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7668 val = nr64(ESPC_MOD_STR_LEN);
7669 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
7670 (unsigned long long) val);
7671 if (val >= 8 * 4)
7672 return -EINVAL;
7674 for (i = 0; i < val; i += 4) {
7675 u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
7677 np->vpd.model[i + 3] = (tmp >> 0) & 0xff;
7678 np->vpd.model[i + 2] = (tmp >> 8) & 0xff;
7679 np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
7680 np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
7682 np->vpd.model[val] = '\0';
7684 val = nr64(ESPC_BD_MOD_STR_LEN);
7685 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7686 (unsigned long long) val);
7687 if (val >= 4 * 4)
7688 return -EINVAL;
7690 for (i = 0; i < val; i += 4) {
7691 u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
7693 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff;
7694 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff;
7695 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
7696 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
7698 np->vpd.board_model[val] = '\0';
7700 np->vpd.mac_num =
7701 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
7702 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
7703 np->vpd.mac_num);
7705 return 0;
7708 static int __devinit niu_get_and_validate_port(struct niu *np)
7710 struct niu_parent *parent = np->parent;
7712 if (np->port <= 1)
7713 np->flags |= NIU_FLAGS_XMAC;
7715 if (!parent->num_ports) {
7716 if (parent->plat_type == PLAT_TYPE_NIU) {
7717 parent->num_ports = 2;
7718 } else {
7719 parent->num_ports = niu_pci_vpd_get_nports(np);
7720 if (!parent->num_ports) {
7721 /* Fall back to SPROM as last resort.
7722 * This will fail on most cards.
7724 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7725 ESPC_NUM_PORTS_MACS_VAL;
7727 /* All of the current probing methods fail on
7728 * Maramba on-board parts.
7730 if (!parent->num_ports)
7731 parent->num_ports = 4;
7736 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7737 np->port, parent->num_ports);
7738 if (np->port >= parent->num_ports)
7739 return -ENODEV;
7741 return 0;
7744 static int __devinit phy_record(struct niu_parent *parent,
7745 struct phy_probe_info *p,
7746 int dev_id_1, int dev_id_2, u8 phy_port,
7747 int type)
7749 u32 id = (dev_id_1 << 16) | dev_id_2;
7750 u8 idx;
7752 if (dev_id_1 < 0 || dev_id_2 < 0)
7753 return 0;
7754 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
7755 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
7756 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
7757 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
7758 return 0;
7759 } else {
7760 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
7761 return 0;
7764 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7765 parent->index, id,
7766 (type == PHY_TYPE_PMA_PMD ?
7767 "PMA/PMD" :
7768 (type == PHY_TYPE_PCS ?
7769 "PCS" : "MII")),
7770 phy_port);
7772 if (p->cur[type] >= NIU_MAX_PORTS) {
7773 printk(KERN_ERR PFX "Too many PHY ports.\n");
7774 return -EINVAL;
7776 idx = p->cur[type];
7777 p->phy_id[type][idx] = id;
7778 p->phy_port[type][idx] = phy_port;
7779 p->cur[type] = idx + 1;
7780 return 0;
7783 static int __devinit port_has_10g(struct phy_probe_info *p, int port)
7785 int i;
7787 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
7788 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
7789 return 1;
7791 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
7792 if (p->phy_port[PHY_TYPE_PCS][i] == port)
7793 return 1;
7796 return 0;
7799 static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
7801 int port, cnt;
7803 cnt = 0;
7804 *lowest = 32;
7805 for (port = 8; port < 32; port++) {
7806 if (port_has_10g(p, port)) {
7807 if (!cnt)
7808 *lowest = port;
7809 cnt++;
7813 return cnt;
7816 static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
7818 *lowest = 32;
7819 if (p->cur[PHY_TYPE_MII])
7820 *lowest = p->phy_port[PHY_TYPE_MII][0];
7822 return p->cur[PHY_TYPE_MII];
7825 static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
7827 int num_ports = parent->num_ports;
7828 int i;
7830 for (i = 0; i < num_ports; i++) {
7831 parent->rxchan_per_port[i] = (16 / num_ports);
7832 parent->txchan_per_port[i] = (16 / num_ports);
7834 pr_info(PFX "niu%d: Port %u [%u RX chans] "
7835 "[%u TX chans]\n",
7836 parent->index, i,
7837 parent->rxchan_per_port[i],
7838 parent->txchan_per_port[i]);
7842 static void __devinit niu_divide_channels(struct niu_parent *parent,
7843 int num_10g, int num_1g)
7845 int num_ports = parent->num_ports;
7846 int rx_chans_per_10g, rx_chans_per_1g;
7847 int tx_chans_per_10g, tx_chans_per_1g;
7848 int i, tot_rx, tot_tx;
7850 if (!num_10g || !num_1g) {
7851 rx_chans_per_10g = rx_chans_per_1g =
7852 (NIU_NUM_RXCHAN / num_ports);
7853 tx_chans_per_10g = tx_chans_per_1g =
7854 (NIU_NUM_TXCHAN / num_ports);
7855 } else {
7856 rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
7857 rx_chans_per_10g = (NIU_NUM_RXCHAN -
7858 (rx_chans_per_1g * num_1g)) /
7859 num_10g;
7861 tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
7862 tx_chans_per_10g = (NIU_NUM_TXCHAN -
7863 (tx_chans_per_1g * num_1g)) /
7864 num_10g;
7867 tot_rx = tot_tx = 0;
7868 for (i = 0; i < num_ports; i++) {
7869 int type = phy_decode(parent->port_phy, i);
7871 if (type == PORT_TYPE_10G) {
7872 parent->rxchan_per_port[i] = rx_chans_per_10g;
7873 parent->txchan_per_port[i] = tx_chans_per_10g;
7874 } else {
7875 parent->rxchan_per_port[i] = rx_chans_per_1g;
7876 parent->txchan_per_port[i] = tx_chans_per_1g;
7878 pr_info(PFX "niu%d: Port %u [%u RX chans] "
7879 "[%u TX chans]\n",
7880 parent->index, i,
7881 parent->rxchan_per_port[i],
7882 parent->txchan_per_port[i]);
7883 tot_rx += parent->rxchan_per_port[i];
7884 tot_tx += parent->txchan_per_port[i];
7887 if (tot_rx > NIU_NUM_RXCHAN) {
7888 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
7889 "resetting to one per port.\n",
7890 parent->index, tot_rx);
7891 for (i = 0; i < num_ports; i++)
7892 parent->rxchan_per_port[i] = 1;
7894 if (tot_tx > NIU_NUM_TXCHAN) {
7895 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
7896 "resetting to one per port.\n",
7897 parent->index, tot_tx);
7898 for (i = 0; i < num_ports; i++)
7899 parent->txchan_per_port[i] = 1;
7901 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
7902 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
7903 "RX[%d] TX[%d]\n",
7904 parent->index, tot_rx, tot_tx);
7908 static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
7909 int num_10g, int num_1g)
7911 int i, num_ports = parent->num_ports;
7912 int rdc_group, rdc_groups_per_port;
7913 int rdc_channel_base;
7915 rdc_group = 0;
7916 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
7918 rdc_channel_base = 0;
7920 for (i = 0; i < num_ports; i++) {
7921 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
7922 int grp, num_channels = parent->rxchan_per_port[i];
7923 int this_channel_offset;
7925 tp->first_table_num = rdc_group;
7926 tp->num_tables = rdc_groups_per_port;
7927 this_channel_offset = 0;
7928 for (grp = 0; grp < tp->num_tables; grp++) {
7929 struct rdc_table *rt = &tp->tables[grp];
7930 int slot;
7932 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
7933 parent->index, i, tp->first_table_num + grp);
7934 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
7935 rt->rxdma_channel[slot] =
7936 rdc_channel_base + this_channel_offset;
7938 printk("%d ", rt->rxdma_channel[slot]);
7940 if (++this_channel_offset == num_channels)
7941 this_channel_offset = 0;
7943 printk("]\n");
7946 parent->rdc_default[i] = rdc_channel_base;
7948 rdc_channel_base += num_channels;
7949 rdc_group += rdc_groups_per_port;
7953 static int __devinit fill_phy_probe_info(struct niu *np,
7954 struct niu_parent *parent,
7955 struct phy_probe_info *info)
7957 unsigned long flags;
7958 int port, err;
7960 memset(info, 0, sizeof(*info));
7962 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
7963 niu_lock_parent(np, flags);
7964 err = 0;
7965 for (port = 8; port < 32; port++) {
7966 int dev_id_1, dev_id_2;
7968 dev_id_1 = mdio_read(np, port,
7969 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
7970 dev_id_2 = mdio_read(np, port,
7971 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
7972 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7973 PHY_TYPE_PMA_PMD);
7974 if (err)
7975 break;
7976 dev_id_1 = mdio_read(np, port,
7977 NIU_PCS_DEV_ADDR, MII_PHYSID1);
7978 dev_id_2 = mdio_read(np, port,
7979 NIU_PCS_DEV_ADDR, MII_PHYSID2);
7980 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7981 PHY_TYPE_PCS);
7982 if (err)
7983 break;
7984 dev_id_1 = mii_read(np, port, MII_PHYSID1);
7985 dev_id_2 = mii_read(np, port, MII_PHYSID2);
7986 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7987 PHY_TYPE_MII);
7988 if (err)
7989 break;
7991 niu_unlock_parent(np, flags);
7993 return err;
7996 static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7998 struct phy_probe_info *info = &parent->phy_probe_info;
7999 int lowest_10g, lowest_1g;
8000 int num_10g, num_1g;
8001 u32 val;
8002 int err;
8004 num_10g = num_1g = 0;
8006 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8007 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8008 num_10g = 0;
8009 num_1g = 2;
8010 parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8011 parent->num_ports = 4;
8012 val = (phy_encode(PORT_TYPE_1G, 0) |
8013 phy_encode(PORT_TYPE_1G, 1) |
8014 phy_encode(PORT_TYPE_1G, 2) |
8015 phy_encode(PORT_TYPE_1G, 3));
8016 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8017 num_10g = 2;
8018 num_1g = 0;
8019 parent->num_ports = 2;
8020 val = (phy_encode(PORT_TYPE_10G, 0) |
8021 phy_encode(PORT_TYPE_10G, 1));
8022 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8023 (parent->plat_type == PLAT_TYPE_NIU)) {
8024 /* this is the Monza case */
8025 if (np->flags & NIU_FLAGS_10G) {
8026 val = (phy_encode(PORT_TYPE_10G, 0) |
8027 phy_encode(PORT_TYPE_10G, 1));
8028 } else {
8029 val = (phy_encode(PORT_TYPE_1G, 0) |
8030 phy_encode(PORT_TYPE_1G, 1));
8032 } else {
8033 err = fill_phy_probe_info(np, parent, info);
8034 if (err)
8035 return err;
8037 num_10g = count_10g_ports(info, &lowest_10g);
8038 num_1g = count_1g_ports(info, &lowest_1g);
8040 switch ((num_10g << 4) | num_1g) {
8041 case 0x24:
8042 if (lowest_1g == 10)
8043 parent->plat_type = PLAT_TYPE_VF_P0;
8044 else if (lowest_1g == 26)
8045 parent->plat_type = PLAT_TYPE_VF_P1;
8046 else
8047 goto unknown_vg_1g_port;
8049 /* fallthru */
8050 case 0x22:
8051 val = (phy_encode(PORT_TYPE_10G, 0) |
8052 phy_encode(PORT_TYPE_10G, 1) |
8053 phy_encode(PORT_TYPE_1G, 2) |
8054 phy_encode(PORT_TYPE_1G, 3));
8055 break;
8057 case 0x20:
8058 val = (phy_encode(PORT_TYPE_10G, 0) |
8059 phy_encode(PORT_TYPE_10G, 1));
8060 break;
8062 case 0x10:
8063 val = phy_encode(PORT_TYPE_10G, np->port);
8064 break;
8066 case 0x14:
8067 if (lowest_1g == 10)
8068 parent->plat_type = PLAT_TYPE_VF_P0;
8069 else if (lowest_1g == 26)
8070 parent->plat_type = PLAT_TYPE_VF_P1;
8071 else
8072 goto unknown_vg_1g_port;
8074 /* fallthru */
8075 case 0x13:
8076 if ((lowest_10g & 0x7) == 0)
8077 val = (phy_encode(PORT_TYPE_10G, 0) |
8078 phy_encode(PORT_TYPE_1G, 1) |
8079 phy_encode(PORT_TYPE_1G, 2) |
8080 phy_encode(PORT_TYPE_1G, 3));
8081 else
8082 val = (phy_encode(PORT_TYPE_1G, 0) |
8083 phy_encode(PORT_TYPE_10G, 1) |
8084 phy_encode(PORT_TYPE_1G, 2) |
8085 phy_encode(PORT_TYPE_1G, 3));
8086 break;
8088 case 0x04:
8089 if (lowest_1g == 10)
8090 parent->plat_type = PLAT_TYPE_VF_P0;
8091 else if (lowest_1g == 26)
8092 parent->plat_type = PLAT_TYPE_VF_P1;
8093 else
8094 goto unknown_vg_1g_port;
8096 val = (phy_encode(PORT_TYPE_1G, 0) |
8097 phy_encode(PORT_TYPE_1G, 1) |
8098 phy_encode(PORT_TYPE_1G, 2) |
8099 phy_encode(PORT_TYPE_1G, 3));
8100 break;
8102 default:
8103 printk(KERN_ERR PFX "Unsupported port config "
8104 "10G[%d] 1G[%d]\n",
8105 num_10g, num_1g);
8106 return -EINVAL;
8110 parent->port_phy = val;
8112 if (parent->plat_type == PLAT_TYPE_NIU)
8113 niu_n2_divide_channels(parent);
8114 else
8115 niu_divide_channels(parent, num_10g, num_1g);
8117 niu_divide_rdc_groups(parent, num_10g, num_1g);
8119 return 0;
8121 unknown_vg_1g_port:
8122 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
8123 lowest_1g);
8124 return -EINVAL;
8127 static int __devinit niu_probe_ports(struct niu *np)
8129 struct niu_parent *parent = np->parent;
8130 int err, i;
8132 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
8133 parent->port_phy);
8135 if (parent->port_phy == PORT_PHY_UNKNOWN) {
8136 err = walk_phys(np, parent);
8137 if (err)
8138 return err;
8140 niu_set_ldg_timer_res(np, 2);
8141 for (i = 0; i <= LDN_MAX; i++)
8142 niu_ldn_irq_enable(np, i, 0);
8145 if (parent->port_phy == PORT_PHY_INVALID)
8146 return -EINVAL;
8148 return 0;
8151 static int __devinit niu_classifier_swstate_init(struct niu *np)
8153 struct niu_classifier *cp = &np->clas;
8155 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
8156 np->parent->tcam_num_entries);
8158 cp->tcam_index = (u16) np->port;
8159 cp->h1_init = 0xffffffff;
8160 cp->h2_init = 0xffff;
8162 return fflp_early_init(np);
8165 static void __devinit niu_link_config_init(struct niu *np)
8167 struct niu_link_config *lp = &np->link_config;
8169 lp->advertising = (ADVERTISED_10baseT_Half |
8170 ADVERTISED_10baseT_Full |
8171 ADVERTISED_100baseT_Half |
8172 ADVERTISED_100baseT_Full |
8173 ADVERTISED_1000baseT_Half |
8174 ADVERTISED_1000baseT_Full |
8175 ADVERTISED_10000baseT_Full |
8176 ADVERTISED_Autoneg);
8177 lp->speed = lp->active_speed = SPEED_INVALID;
8178 lp->duplex = lp->active_duplex = DUPLEX_INVALID;
8179 #if 0
8180 lp->loopback_mode = LOOPBACK_MAC;
8181 lp->active_speed = SPEED_10000;
8182 lp->active_duplex = DUPLEX_FULL;
8183 #else
8184 lp->loopback_mode = LOOPBACK_DISABLED;
8185 #endif
8188 static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
8190 switch (np->port) {
8191 case 0:
8192 np->mac_regs = np->regs + XMAC_PORT0_OFF;
8193 np->ipp_off = 0x00000;
8194 np->pcs_off = 0x04000;
8195 np->xpcs_off = 0x02000;
8196 break;
8198 case 1:
8199 np->mac_regs = np->regs + XMAC_PORT1_OFF;
8200 np->ipp_off = 0x08000;
8201 np->pcs_off = 0x0a000;
8202 np->xpcs_off = 0x08000;
8203 break;
8205 case 2:
8206 np->mac_regs = np->regs + BMAC_PORT2_OFF;
8207 np->ipp_off = 0x04000;
8208 np->pcs_off = 0x0e000;
8209 np->xpcs_off = ~0UL;
8210 break;
8212 case 3:
8213 np->mac_regs = np->regs + BMAC_PORT3_OFF;
8214 np->ipp_off = 0x0c000;
8215 np->pcs_off = 0x12000;
8216 np->xpcs_off = ~0UL;
8217 break;
8219 default:
8220 dev_err(np->device, PFX "Port %u is invalid, cannot "
8221 "compute MAC block offset.\n", np->port);
8222 return -EINVAL;
8225 return 0;
8228 static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
8230 struct msix_entry msi_vec[NIU_NUM_LDG];
8231 struct niu_parent *parent = np->parent;
8232 struct pci_dev *pdev = np->pdev;
8233 int i, num_irqs, err;
8234 u8 first_ldg;
8236 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
8237 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
8238 ldg_num_map[i] = first_ldg + i;
8240 num_irqs = (parent->rxchan_per_port[np->port] +
8241 parent->txchan_per_port[np->port] +
8242 (np->port == 0 ? 3 : 1));
8243 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
8245 retry:
8246 for (i = 0; i < num_irqs; i++) {
8247 msi_vec[i].vector = 0;
8248 msi_vec[i].entry = i;
8251 err = pci_enable_msix(pdev, msi_vec, num_irqs);
8252 if (err < 0) {
8253 np->flags &= ~NIU_FLAGS_MSIX;
8254 return;
8256 if (err > 0) {
8257 num_irqs = err;
8258 goto retry;
8261 np->flags |= NIU_FLAGS_MSIX;
8262 for (i = 0; i < num_irqs; i++)
8263 np->ldg[i].irq = msi_vec[i].vector;
8264 np->num_ldg = num_irqs;
8267 static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
8269 #ifdef CONFIG_SPARC64
8270 struct of_device *op = np->op;
8271 const u32 *int_prop;
8272 int i;
8274 int_prop = of_get_property(op->node, "interrupts", NULL);
8275 if (!int_prop)
8276 return -ENODEV;
8278 for (i = 0; i < op->num_irqs; i++) {
8279 ldg_num_map[i] = int_prop[i];
8280 np->ldg[i].irq = op->irqs[i];
8283 np->num_ldg = op->num_irqs;
8285 return 0;
8286 #else
8287 return -EINVAL;
8288 #endif
8291 static int __devinit niu_ldg_init(struct niu *np)
8293 struct niu_parent *parent = np->parent;
8294 u8 ldg_num_map[NIU_NUM_LDG];
8295 int first_chan, num_chan;
8296 int i, err, ldg_rotor;
8297 u8 port;
8299 np->num_ldg = 1;
8300 np->ldg[0].irq = np->dev->irq;
8301 if (parent->plat_type == PLAT_TYPE_NIU) {
8302 err = niu_n2_irq_init(np, ldg_num_map);
8303 if (err)
8304 return err;
8305 } else
8306 niu_try_msix(np, ldg_num_map);
8308 port = np->port;
8309 for (i = 0; i < np->num_ldg; i++) {
8310 struct niu_ldg *lp = &np->ldg[i];
8312 netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
8314 lp->np = np;
8315 lp->ldg_num = ldg_num_map[i];
8316 lp->timer = 2; /* XXX */
8318 /* On N2 NIU the firmware has setup the SID mappings so they go
8319 * to the correct values that will route the LDG to the proper
8320 * interrupt in the NCU interrupt table.
8322 if (np->parent->plat_type != PLAT_TYPE_NIU) {
8323 err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
8324 if (err)
8325 return err;
8329 /* We adopt the LDG assignment ordering used by the N2 NIU
8330 * 'interrupt' properties because that simplifies a lot of
8331 * things. This ordering is:
8333 * MAC
8334 * MIF (if port zero)
8335 * SYSERR (if port zero)
8336 * RX channels
8337 * TX channels
8340 ldg_rotor = 0;
8342 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
8343 LDN_MAC(port));
8344 if (err)
8345 return err;
8347 ldg_rotor++;
8348 if (ldg_rotor == np->num_ldg)
8349 ldg_rotor = 0;
8351 if (port == 0) {
8352 err = niu_ldg_assign_ldn(np, parent,
8353 ldg_num_map[ldg_rotor],
8354 LDN_MIF);
8355 if (err)
8356 return err;
8358 ldg_rotor++;
8359 if (ldg_rotor == np->num_ldg)
8360 ldg_rotor = 0;
8362 err = niu_ldg_assign_ldn(np, parent,
8363 ldg_num_map[ldg_rotor],
8364 LDN_DEVICE_ERROR);
8365 if (err)
8366 return err;
8368 ldg_rotor++;
8369 if (ldg_rotor == np->num_ldg)
8370 ldg_rotor = 0;
8374 first_chan = 0;
8375 for (i = 0; i < port; i++)
8376 first_chan += parent->rxchan_per_port[port];
8377 num_chan = parent->rxchan_per_port[port];
8379 for (i = first_chan; i < (first_chan + num_chan); i++) {
8380 err = niu_ldg_assign_ldn(np, parent,
8381 ldg_num_map[ldg_rotor],
8382 LDN_RXDMA(i));
8383 if (err)
8384 return err;
8385 ldg_rotor++;
8386 if (ldg_rotor == np->num_ldg)
8387 ldg_rotor = 0;
8390 first_chan = 0;
8391 for (i = 0; i < port; i++)
8392 first_chan += parent->txchan_per_port[port];
8393 num_chan = parent->txchan_per_port[port];
8394 for (i = first_chan; i < (first_chan + num_chan); i++) {
8395 err = niu_ldg_assign_ldn(np, parent,
8396 ldg_num_map[ldg_rotor],
8397 LDN_TXDMA(i));
8398 if (err)
8399 return err;
8400 ldg_rotor++;
8401 if (ldg_rotor == np->num_ldg)
8402 ldg_rotor = 0;
8405 return 0;
8408 static void __devexit niu_ldg_free(struct niu *np)
8410 if (np->flags & NIU_FLAGS_MSIX)
8411 pci_disable_msix(np->pdev);
8414 static int __devinit niu_get_of_props(struct niu *np)
8416 #ifdef CONFIG_SPARC64
8417 struct net_device *dev = np->dev;
8418 struct device_node *dp;
8419 const char *phy_type;
8420 const u8 *mac_addr;
8421 const char *model;
8422 int prop_len;
8424 if (np->parent->plat_type == PLAT_TYPE_NIU)
8425 dp = np->op->node;
8426 else
8427 dp = pci_device_to_OF_node(np->pdev);
8429 phy_type = of_get_property(dp, "phy-type", &prop_len);
8430 if (!phy_type) {
8431 dev_err(np->device, PFX "%s: OF node lacks "
8432 "phy-type property\n",
8433 dp->full_name);
8434 return -EINVAL;
8437 if (!strcmp(phy_type, "none"))
8438 return -ENODEV;
8440 strcpy(np->vpd.phy_type, phy_type);
8442 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8443 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
8444 dp->full_name, np->vpd.phy_type);
8445 return -EINVAL;
8448 mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
8449 if (!mac_addr) {
8450 dev_err(np->device, PFX "%s: OF node lacks "
8451 "local-mac-address property\n",
8452 dp->full_name);
8453 return -EINVAL;
8455 if (prop_len != dev->addr_len) {
8456 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
8457 "is wrong.\n",
8458 dp->full_name, prop_len);
8460 memcpy(dev->perm_addr, mac_addr, dev->addr_len);
8461 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
8462 int i;
8464 dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
8465 dp->full_name);
8466 dev_err(np->device, PFX "%s: [ \n",
8467 dp->full_name);
8468 for (i = 0; i < 6; i++)
8469 printk("%02x ", dev->perm_addr[i]);
8470 printk("]\n");
8471 return -EINVAL;
8474 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8476 model = of_get_property(dp, "model", &prop_len);
8478 if (model)
8479 strcpy(np->vpd.model, model);
8481 return 0;
8482 #else
8483 return -EINVAL;
8484 #endif
8487 static int __devinit niu_get_invariants(struct niu *np)
8489 int err, have_props;
8490 u32 offset;
8492 err = niu_get_of_props(np);
8493 if (err == -ENODEV)
8494 return err;
8496 have_props = !err;
8498 err = niu_init_mac_ipp_pcs_base(np);
8499 if (err)
8500 return err;
8502 if (have_props) {
8503 err = niu_get_and_validate_port(np);
8504 if (err)
8505 return err;
8507 } else {
8508 if (np->parent->plat_type == PLAT_TYPE_NIU)
8509 return -EINVAL;
8511 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
8512 offset = niu_pci_vpd_offset(np);
8513 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
8514 offset);
8515 if (offset)
8516 niu_pci_vpd_fetch(np, offset);
8517 nw64(ESPC_PIO_EN, 0);
8519 if (np->flags & NIU_FLAGS_VPD_VALID) {
8520 niu_pci_vpd_validate(np);
8521 err = niu_get_and_validate_port(np);
8522 if (err)
8523 return err;
8526 if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
8527 err = niu_get_and_validate_port(np);
8528 if (err)
8529 return err;
8530 err = niu_pci_probe_sprom(np);
8531 if (err)
8532 return err;
8536 err = niu_probe_ports(np);
8537 if (err)
8538 return err;
8540 niu_ldg_init(np);
8542 niu_classifier_swstate_init(np);
8543 niu_link_config_init(np);
8545 err = niu_determine_phy_disposition(np);
8546 if (!err)
8547 err = niu_init_link(np);
8549 return err;
8552 static LIST_HEAD(niu_parent_list);
8553 static DEFINE_MUTEX(niu_parent_lock);
8554 static int niu_parent_index;
8556 static ssize_t show_port_phy(struct device *dev,
8557 struct device_attribute *attr, char *buf)
8559 struct platform_device *plat_dev = to_platform_device(dev);
8560 struct niu_parent *p = plat_dev->dev.platform_data;
8561 u32 port_phy = p->port_phy;
8562 char *orig_buf = buf;
8563 int i;
8565 if (port_phy == PORT_PHY_UNKNOWN ||
8566 port_phy == PORT_PHY_INVALID)
8567 return 0;
8569 for (i = 0; i < p->num_ports; i++) {
8570 const char *type_str;
8571 int type;
8573 type = phy_decode(port_phy, i);
8574 if (type == PORT_TYPE_10G)
8575 type_str = "10G";
8576 else
8577 type_str = "1G";
8578 buf += sprintf(buf,
8579 (i == 0) ? "%s" : " %s",
8580 type_str);
8582 buf += sprintf(buf, "\n");
8583 return buf - orig_buf;
8586 static ssize_t show_plat_type(struct device *dev,
8587 struct device_attribute *attr, char *buf)
8589 struct platform_device *plat_dev = to_platform_device(dev);
8590 struct niu_parent *p = plat_dev->dev.platform_data;
8591 const char *type_str;
8593 switch (p->plat_type) {
8594 case PLAT_TYPE_ATLAS:
8595 type_str = "atlas";
8596 break;
8597 case PLAT_TYPE_NIU:
8598 type_str = "niu";
8599 break;
8600 case PLAT_TYPE_VF_P0:
8601 type_str = "vf_p0";
8602 break;
8603 case PLAT_TYPE_VF_P1:
8604 type_str = "vf_p1";
8605 break;
8606 default:
8607 type_str = "unknown";
8608 break;
8611 return sprintf(buf, "%s\n", type_str);
8614 static ssize_t __show_chan_per_port(struct device *dev,
8615 struct device_attribute *attr, char *buf,
8616 int rx)
8618 struct platform_device *plat_dev = to_platform_device(dev);
8619 struct niu_parent *p = plat_dev->dev.platform_data;
8620 char *orig_buf = buf;
8621 u8 *arr;
8622 int i;
8624 arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
8626 for (i = 0; i < p->num_ports; i++) {
8627 buf += sprintf(buf,
8628 (i == 0) ? "%d" : " %d",
8629 arr[i]);
8631 buf += sprintf(buf, "\n");
8633 return buf - orig_buf;
8636 static ssize_t show_rxchan_per_port(struct device *dev,
8637 struct device_attribute *attr, char *buf)
8639 return __show_chan_per_port(dev, attr, buf, 1);
8642 static ssize_t show_txchan_per_port(struct device *dev,
8643 struct device_attribute *attr, char *buf)
8645 return __show_chan_per_port(dev, attr, buf, 1);
8648 static ssize_t show_num_ports(struct device *dev,
8649 struct device_attribute *attr, char *buf)
8651 struct platform_device *plat_dev = to_platform_device(dev);
8652 struct niu_parent *p = plat_dev->dev.platform_data;
8654 return sprintf(buf, "%d\n", p->num_ports);
8657 static struct device_attribute niu_parent_attributes[] = {
8658 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
8659 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
8660 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
8661 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
8662 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
8666 static struct niu_parent * __devinit niu_new_parent(struct niu *np,
8667 union niu_parent_id *id,
8668 u8 ptype)
8670 struct platform_device *plat_dev;
8671 struct niu_parent *p;
8672 int i;
8674 niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
8676 plat_dev = platform_device_register_simple("niu", niu_parent_index,
8677 NULL, 0);
8678 if (!plat_dev)
8679 return NULL;
8681 for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
8682 int err = device_create_file(&plat_dev->dev,
8683 &niu_parent_attributes[i]);
8684 if (err)
8685 goto fail_unregister;
8688 p = kzalloc(sizeof(*p), GFP_KERNEL);
8689 if (!p)
8690 goto fail_unregister;
8692 p->index = niu_parent_index++;
8694 plat_dev->dev.platform_data = p;
8695 p->plat_dev = plat_dev;
8697 memcpy(&p->id, id, sizeof(*id));
8698 p->plat_type = ptype;
8699 INIT_LIST_HEAD(&p->list);
8700 atomic_set(&p->refcnt, 0);
8701 list_add(&p->list, &niu_parent_list);
8702 spin_lock_init(&p->lock);
8704 p->rxdma_clock_divider = 7500;
8706 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
8707 if (p->plat_type == PLAT_TYPE_NIU)
8708 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
8710 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
8711 int index = i - CLASS_CODE_USER_PROG1;
8713 p->tcam_key[index] = TCAM_KEY_TSEL;
8714 p->flow_key[index] = (FLOW_KEY_IPSA |
8715 FLOW_KEY_IPDA |
8716 FLOW_KEY_PROTO |
8717 (FLOW_KEY_L4_BYTE12 <<
8718 FLOW_KEY_L4_0_SHIFT) |
8719 (FLOW_KEY_L4_BYTE12 <<
8720 FLOW_KEY_L4_1_SHIFT));
8723 for (i = 0; i < LDN_MAX + 1; i++)
8724 p->ldg_map[i] = LDG_INVALID;
8726 return p;
8728 fail_unregister:
8729 platform_device_unregister(plat_dev);
8730 return NULL;
8733 static struct niu_parent * __devinit niu_get_parent(struct niu *np,
8734 union niu_parent_id *id,
8735 u8 ptype)
8737 struct niu_parent *p, *tmp;
8738 int port = np->port;
8740 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
8741 ptype, port);
8743 mutex_lock(&niu_parent_lock);
8744 p = NULL;
8745 list_for_each_entry(tmp, &niu_parent_list, list) {
8746 if (!memcmp(id, &tmp->id, sizeof(*id))) {
8747 p = tmp;
8748 break;
8751 if (!p)
8752 p = niu_new_parent(np, id, ptype);
8754 if (p) {
8755 char port_name[6];
8756 int err;
8758 sprintf(port_name, "port%d", port);
8759 err = sysfs_create_link(&p->plat_dev->dev.kobj,
8760 &np->device->kobj,
8761 port_name);
8762 if (!err) {
8763 p->ports[port] = np;
8764 atomic_inc(&p->refcnt);
8767 mutex_unlock(&niu_parent_lock);
8769 return p;
8772 static void niu_put_parent(struct niu *np)
8774 struct niu_parent *p = np->parent;
8775 u8 port = np->port;
8776 char port_name[6];
8778 BUG_ON(!p || p->ports[port] != np);
8780 niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
8782 sprintf(port_name, "port%d", port);
8784 mutex_lock(&niu_parent_lock);
8786 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
8788 p->ports[port] = NULL;
8789 np->parent = NULL;
8791 if (atomic_dec_and_test(&p->refcnt)) {
8792 list_del(&p->list);
8793 platform_device_unregister(p->plat_dev);
8796 mutex_unlock(&niu_parent_lock);
8799 static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
8800 u64 *handle, gfp_t flag)
8802 dma_addr_t dh;
8803 void *ret;
8805 ret = dma_alloc_coherent(dev, size, &dh, flag);
8806 if (ret)
8807 *handle = dh;
8808 return ret;
8811 static void niu_pci_free_coherent(struct device *dev, size_t size,
8812 void *cpu_addr, u64 handle)
8814 dma_free_coherent(dev, size, cpu_addr, handle);
8817 static u64 niu_pci_map_page(struct device *dev, struct page *page,
8818 unsigned long offset, size_t size,
8819 enum dma_data_direction direction)
8821 return dma_map_page(dev, page, offset, size, direction);
8824 static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
8825 size_t size, enum dma_data_direction direction)
8827 return dma_unmap_page(dev, dma_address, size, direction);
8830 static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
8831 size_t size,
8832 enum dma_data_direction direction)
8834 return dma_map_single(dev, cpu_addr, size, direction);
8837 static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
8838 size_t size,
8839 enum dma_data_direction direction)
8841 dma_unmap_single(dev, dma_address, size, direction);
8844 static const struct niu_ops niu_pci_ops = {
8845 .alloc_coherent = niu_pci_alloc_coherent,
8846 .free_coherent = niu_pci_free_coherent,
8847 .map_page = niu_pci_map_page,
8848 .unmap_page = niu_pci_unmap_page,
8849 .map_single = niu_pci_map_single,
8850 .unmap_single = niu_pci_unmap_single,
8853 static void __devinit niu_driver_version(void)
8855 static int niu_version_printed;
8857 if (niu_version_printed++ == 0)
8858 pr_info("%s", version);
8861 static struct net_device * __devinit niu_alloc_and_init(
8862 struct device *gen_dev, struct pci_dev *pdev,
8863 struct of_device *op, const struct niu_ops *ops,
8864 u8 port)
8866 struct net_device *dev;
8867 struct niu *np;
8869 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
8870 if (!dev) {
8871 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
8872 return NULL;
8875 SET_NETDEV_DEV(dev, gen_dev);
8877 np = netdev_priv(dev);
8878 np->dev = dev;
8879 np->pdev = pdev;
8880 np->op = op;
8881 np->device = gen_dev;
8882 np->ops = ops;
8884 np->msg_enable = niu_debug;
8886 spin_lock_init(&np->lock);
8887 INIT_WORK(&np->reset_task, niu_reset_task);
8889 np->port = port;
8891 return dev;
8894 static void __devinit niu_assign_netdev_ops(struct net_device *dev)
8896 dev->open = niu_open;
8897 dev->stop = niu_close;
8898 dev->get_stats = niu_get_stats;
8899 dev->set_multicast_list = niu_set_rx_mode;
8900 dev->set_mac_address = niu_set_mac_addr;
8901 dev->do_ioctl = niu_ioctl;
8902 dev->tx_timeout = niu_tx_timeout;
8903 dev->hard_start_xmit = niu_start_xmit;
8904 dev->ethtool_ops = &niu_ethtool_ops;
8905 dev->watchdog_timeo = NIU_TX_TIMEOUT;
8906 dev->change_mtu = niu_change_mtu;
8909 static void __devinit niu_device_announce(struct niu *np)
8911 struct net_device *dev = np->dev;
8912 DECLARE_MAC_BUF(mac);
8914 pr_info("%s: NIU Ethernet %s\n",
8915 dev->name, print_mac(mac, dev->dev_addr));
8917 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
8918 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8919 dev->name,
8920 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8921 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8922 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
8923 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8924 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8925 np->vpd.phy_type);
8926 } else {
8927 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8928 dev->name,
8929 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8930 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8931 (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
8932 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
8933 "COPPER")),
8934 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8935 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8936 np->vpd.phy_type);
8940 static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8941 const struct pci_device_id *ent)
8943 union niu_parent_id parent_id;
8944 struct net_device *dev;
8945 struct niu *np;
8946 int err, pos;
8947 u64 dma_mask;
8948 u16 val16;
8950 niu_driver_version();
8952 err = pci_enable_device(pdev);
8953 if (err) {
8954 dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
8955 "aborting.\n");
8956 return err;
8959 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
8960 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8961 dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
8962 "base addresses, aborting.\n");
8963 err = -ENODEV;
8964 goto err_out_disable_pdev;
8967 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8968 if (err) {
8969 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
8970 "aborting.\n");
8971 goto err_out_disable_pdev;
8974 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8975 if (pos <= 0) {
8976 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
8977 "aborting.\n");
8978 goto err_out_free_res;
8981 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
8982 &niu_pci_ops, PCI_FUNC(pdev->devfn));
8983 if (!dev) {
8984 err = -ENOMEM;
8985 goto err_out_free_res;
8987 np = netdev_priv(dev);
8989 memset(&parent_id, 0, sizeof(parent_id));
8990 parent_id.pci.domain = pci_domain_nr(pdev->bus);
8991 parent_id.pci.bus = pdev->bus->number;
8992 parent_id.pci.device = PCI_SLOT(pdev->devfn);
8994 np->parent = niu_get_parent(np, &parent_id,
8995 PLAT_TYPE_ATLAS);
8996 if (!np->parent) {
8997 err = -ENOMEM;
8998 goto err_out_free_dev;
9001 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
9002 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
9003 val16 |= (PCI_EXP_DEVCTL_CERE |
9004 PCI_EXP_DEVCTL_NFERE |
9005 PCI_EXP_DEVCTL_FERE |
9006 PCI_EXP_DEVCTL_URRE |
9007 PCI_EXP_DEVCTL_RELAX_EN);
9008 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9010 dma_mask = DMA_44BIT_MASK;
9011 err = pci_set_dma_mask(pdev, dma_mask);
9012 if (!err) {
9013 dev->features |= NETIF_F_HIGHDMA;
9014 err = pci_set_consistent_dma_mask(pdev, dma_mask);
9015 if (err) {
9016 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
9017 "DMA for consistent allocations, "
9018 "aborting.\n");
9019 goto err_out_release_parent;
9022 if (err || dma_mask == DMA_32BIT_MASK) {
9023 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9024 if (err) {
9025 dev_err(&pdev->dev, PFX "No usable DMA configuration, "
9026 "aborting.\n");
9027 goto err_out_release_parent;
9031 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
9033 np->regs = pci_ioremap_bar(pdev, 0);
9034 if (!np->regs) {
9035 dev_err(&pdev->dev, PFX "Cannot map device registers, "
9036 "aborting.\n");
9037 err = -ENOMEM;
9038 goto err_out_release_parent;
9041 pci_set_master(pdev);
9042 pci_save_state(pdev);
9044 dev->irq = pdev->irq;
9046 niu_assign_netdev_ops(dev);
9048 err = niu_get_invariants(np);
9049 if (err) {
9050 if (err != -ENODEV)
9051 dev_err(&pdev->dev, PFX "Problem fetching invariants "
9052 "of chip, aborting.\n");
9053 goto err_out_iounmap;
9056 err = register_netdev(dev);
9057 if (err) {
9058 dev_err(&pdev->dev, PFX "Cannot register net device, "
9059 "aborting.\n");
9060 goto err_out_iounmap;
9063 pci_set_drvdata(pdev, dev);
9065 niu_device_announce(np);
9067 return 0;
9069 err_out_iounmap:
9070 if (np->regs) {
9071 iounmap(np->regs);
9072 np->regs = NULL;
9075 err_out_release_parent:
9076 niu_put_parent(np);
9078 err_out_free_dev:
9079 free_netdev(dev);
9081 err_out_free_res:
9082 pci_release_regions(pdev);
9084 err_out_disable_pdev:
9085 pci_disable_device(pdev);
9086 pci_set_drvdata(pdev, NULL);
9088 return err;
9091 static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
9093 struct net_device *dev = pci_get_drvdata(pdev);
9095 if (dev) {
9096 struct niu *np = netdev_priv(dev);
9098 unregister_netdev(dev);
9099 if (np->regs) {
9100 iounmap(np->regs);
9101 np->regs = NULL;
9104 niu_ldg_free(np);
9106 niu_put_parent(np);
9108 free_netdev(dev);
9109 pci_release_regions(pdev);
9110 pci_disable_device(pdev);
9111 pci_set_drvdata(pdev, NULL);
9115 static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9117 struct net_device *dev = pci_get_drvdata(pdev);
9118 struct niu *np = netdev_priv(dev);
9119 unsigned long flags;
9121 if (!netif_running(dev))
9122 return 0;
9124 flush_scheduled_work();
9125 niu_netif_stop(np);
9127 del_timer_sync(&np->timer);
9129 spin_lock_irqsave(&np->lock, flags);
9130 niu_enable_interrupts(np, 0);
9131 spin_unlock_irqrestore(&np->lock, flags);
9133 netif_device_detach(dev);
9135 spin_lock_irqsave(&np->lock, flags);
9136 niu_stop_hw(np);
9137 spin_unlock_irqrestore(&np->lock, flags);
9139 pci_save_state(pdev);
9141 return 0;
9144 static int niu_resume(struct pci_dev *pdev)
9146 struct net_device *dev = pci_get_drvdata(pdev);
9147 struct niu *np = netdev_priv(dev);
9148 unsigned long flags;
9149 int err;
9151 if (!netif_running(dev))
9152 return 0;
9154 pci_restore_state(pdev);
9156 netif_device_attach(dev);
9158 spin_lock_irqsave(&np->lock, flags);
9160 err = niu_init_hw(np);
9161 if (!err) {
9162 np->timer.expires = jiffies + HZ;
9163 add_timer(&np->timer);
9164 niu_netif_start(np);
9167 spin_unlock_irqrestore(&np->lock, flags);
9169 return err;
9172 static struct pci_driver niu_pci_driver = {
9173 .name = DRV_MODULE_NAME,
9174 .id_table = niu_pci_tbl,
9175 .probe = niu_pci_init_one,
9176 .remove = __devexit_p(niu_pci_remove_one),
9177 .suspend = niu_suspend,
9178 .resume = niu_resume,
9181 #ifdef CONFIG_SPARC64
9182 static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9183 u64 *dma_addr, gfp_t flag)
9185 unsigned long order = get_order(size);
9186 unsigned long page = __get_free_pages(flag, order);
9188 if (page == 0UL)
9189 return NULL;
9190 memset((char *)page, 0, PAGE_SIZE << order);
9191 *dma_addr = __pa(page);
9193 return (void *) page;
9196 static void niu_phys_free_coherent(struct device *dev, size_t size,
9197 void *cpu_addr, u64 handle)
9199 unsigned long order = get_order(size);
9201 free_pages((unsigned long) cpu_addr, order);
9204 static u64 niu_phys_map_page(struct device *dev, struct page *page,
9205 unsigned long offset, size_t size,
9206 enum dma_data_direction direction)
9208 return page_to_phys(page) + offset;
9211 static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9212 size_t size, enum dma_data_direction direction)
9214 /* Nothing to do. */
9217 static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9218 size_t size,
9219 enum dma_data_direction direction)
9221 return __pa(cpu_addr);
9224 static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
9225 size_t size,
9226 enum dma_data_direction direction)
9228 /* Nothing to do. */
9231 static const struct niu_ops niu_phys_ops = {
9232 .alloc_coherent = niu_phys_alloc_coherent,
9233 .free_coherent = niu_phys_free_coherent,
9234 .map_page = niu_phys_map_page,
9235 .unmap_page = niu_phys_unmap_page,
9236 .map_single = niu_phys_map_single,
9237 .unmap_single = niu_phys_unmap_single,
9240 static unsigned long res_size(struct resource *r)
9242 return r->end - r->start + 1UL;
9245 static int __devinit niu_of_probe(struct of_device *op,
9246 const struct of_device_id *match)
9248 union niu_parent_id parent_id;
9249 struct net_device *dev;
9250 struct niu *np;
9251 const u32 *reg;
9252 int err;
9254 niu_driver_version();
9256 reg = of_get_property(op->node, "reg", NULL);
9257 if (!reg) {
9258 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
9259 op->node->full_name);
9260 return -ENODEV;
9263 dev = niu_alloc_and_init(&op->dev, NULL, op,
9264 &niu_phys_ops, reg[0] & 0x1);
9265 if (!dev) {
9266 err = -ENOMEM;
9267 goto err_out;
9269 np = netdev_priv(dev);
9271 memset(&parent_id, 0, sizeof(parent_id));
9272 parent_id.of = of_get_parent(op->node);
9274 np->parent = niu_get_parent(np, &parent_id,
9275 PLAT_TYPE_NIU);
9276 if (!np->parent) {
9277 err = -ENOMEM;
9278 goto err_out_free_dev;
9281 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
9283 np->regs = of_ioremap(&op->resource[1], 0,
9284 res_size(&op->resource[1]),
9285 "niu regs");
9286 if (!np->regs) {
9287 dev_err(&op->dev, PFX "Cannot map device registers, "
9288 "aborting.\n");
9289 err = -ENOMEM;
9290 goto err_out_release_parent;
9293 np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
9294 res_size(&op->resource[2]),
9295 "niu vregs-1");
9296 if (!np->vir_regs_1) {
9297 dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
9298 "aborting.\n");
9299 err = -ENOMEM;
9300 goto err_out_iounmap;
9303 np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
9304 res_size(&op->resource[3]),
9305 "niu vregs-2");
9306 if (!np->vir_regs_2) {
9307 dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
9308 "aborting.\n");
9309 err = -ENOMEM;
9310 goto err_out_iounmap;
9313 niu_assign_netdev_ops(dev);
9315 err = niu_get_invariants(np);
9316 if (err) {
9317 if (err != -ENODEV)
9318 dev_err(&op->dev, PFX "Problem fetching invariants "
9319 "of chip, aborting.\n");
9320 goto err_out_iounmap;
9323 err = register_netdev(dev);
9324 if (err) {
9325 dev_err(&op->dev, PFX "Cannot register net device, "
9326 "aborting.\n");
9327 goto err_out_iounmap;
9330 dev_set_drvdata(&op->dev, dev);
9332 niu_device_announce(np);
9334 return 0;
9336 err_out_iounmap:
9337 if (np->vir_regs_1) {
9338 of_iounmap(&op->resource[2], np->vir_regs_1,
9339 res_size(&op->resource[2]));
9340 np->vir_regs_1 = NULL;
9343 if (np->vir_regs_2) {
9344 of_iounmap(&op->resource[3], np->vir_regs_2,
9345 res_size(&op->resource[3]));
9346 np->vir_regs_2 = NULL;
9349 if (np->regs) {
9350 of_iounmap(&op->resource[1], np->regs,
9351 res_size(&op->resource[1]));
9352 np->regs = NULL;
9355 err_out_release_parent:
9356 niu_put_parent(np);
9358 err_out_free_dev:
9359 free_netdev(dev);
9361 err_out:
9362 return err;
9365 static int __devexit niu_of_remove(struct of_device *op)
9367 struct net_device *dev = dev_get_drvdata(&op->dev);
9369 if (dev) {
9370 struct niu *np = netdev_priv(dev);
9372 unregister_netdev(dev);
9374 if (np->vir_regs_1) {
9375 of_iounmap(&op->resource[2], np->vir_regs_1,
9376 res_size(&op->resource[2]));
9377 np->vir_regs_1 = NULL;
9380 if (np->vir_regs_2) {
9381 of_iounmap(&op->resource[3], np->vir_regs_2,
9382 res_size(&op->resource[3]));
9383 np->vir_regs_2 = NULL;
9386 if (np->regs) {
9387 of_iounmap(&op->resource[1], np->regs,
9388 res_size(&op->resource[1]));
9389 np->regs = NULL;
9392 niu_ldg_free(np);
9394 niu_put_parent(np);
9396 free_netdev(dev);
9397 dev_set_drvdata(&op->dev, NULL);
9399 return 0;
9402 static const struct of_device_id niu_match[] = {
9404 .name = "network",
9405 .compatible = "SUNW,niusl",
9409 MODULE_DEVICE_TABLE(of, niu_match);
9411 static struct of_platform_driver niu_of_driver = {
9412 .name = "niu",
9413 .match_table = niu_match,
9414 .probe = niu_of_probe,
9415 .remove = __devexit_p(niu_of_remove),
9418 #endif /* CONFIG_SPARC64 */
9420 static int __init niu_init(void)
9422 int err = 0;
9424 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
9426 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
9428 #ifdef CONFIG_SPARC64
9429 err = of_register_driver(&niu_of_driver, &of_bus_type);
9430 #endif
9432 if (!err) {
9433 err = pci_register_driver(&niu_pci_driver);
9434 #ifdef CONFIG_SPARC64
9435 if (err)
9436 of_unregister_driver(&niu_of_driver);
9437 #endif
9440 return err;
9443 static void __exit niu_exit(void)
9445 pci_unregister_driver(&niu_pci_driver);
9446 #ifdef CONFIG_SPARC64
9447 of_unregister_driver(&niu_of_driver);
9448 #endif
9451 module_init(niu_init);
9452 module_exit(niu_exit);