net: dsa: mv88e6xxx: rework FDB getnext operation
[linux-2.6/btrfs-unstable.git] / drivers / net / dsa / mv88e6xxx.c
blob0e588b9809d9cfbe246362ce8b25fe0c10a255f7
1 /*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_bridge.h>
15 #include <linux/jiffies.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/phy.h>
20 #include <linux/seq_file.h>
21 #include <net/dsa.h>
22 #include "mv88e6xxx.h"
24 /* MDIO bus access can be nested in the case of PHYs connected to the
25 * internal MDIO bus of the switch, which is accessed via MDIO bus of
26 * the Ethernet interface. Avoid lockdep false positives by using
27 * mutex_lock_nested().
29 static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
31 int ret;
33 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
34 ret = bus->read(bus, addr, regnum);
35 mutex_unlock(&bus->mdio_lock);
37 return ret;
40 static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
41 u16 val)
43 int ret;
45 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
46 ret = bus->write(bus, addr, regnum, val);
47 mutex_unlock(&bus->mdio_lock);
49 return ret;
52 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
53 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
54 * will be directly accessible on some {device address,register address}
55 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
56 * will only respond to SMI transactions to that specific address, and
57 * an indirect addressing mechanism needs to be used to access its
58 * registers.
60 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
62 int ret;
63 int i;
65 for (i = 0; i < 16; i++) {
66 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
67 if (ret < 0)
68 return ret;
70 if ((ret & SMI_CMD_BUSY) == 0)
71 return 0;
74 return -ETIMEDOUT;
77 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
79 int ret;
81 if (sw_addr == 0)
82 return mv88e6xxx_mdiobus_read(bus, addr, reg);
84 /* Wait for the bus to become free. */
85 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
86 if (ret < 0)
87 return ret;
89 /* Transmit the read command. */
90 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
91 SMI_CMD_OP_22_READ | (addr << 5) | reg);
92 if (ret < 0)
93 return ret;
95 /* Wait for the read command to complete. */
96 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
97 if (ret < 0)
98 return ret;
100 /* Read the data. */
101 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
102 if (ret < 0)
103 return ret;
105 return ret & 0xffff;
108 /* Must be called with SMI mutex held */
109 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
111 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
112 int ret;
114 if (bus == NULL)
115 return -EINVAL;
117 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
118 if (ret < 0)
119 return ret;
121 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
122 addr, reg, ret);
124 return ret;
127 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
129 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
130 int ret;
132 mutex_lock(&ps->smi_mutex);
133 ret = _mv88e6xxx_reg_read(ds, addr, reg);
134 mutex_unlock(&ps->smi_mutex);
136 return ret;
139 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
140 int reg, u16 val)
142 int ret;
144 if (sw_addr == 0)
145 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
147 /* Wait for the bus to become free. */
148 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
149 if (ret < 0)
150 return ret;
152 /* Transmit the data to write. */
153 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
154 if (ret < 0)
155 return ret;
157 /* Transmit the write command. */
158 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
159 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
160 if (ret < 0)
161 return ret;
163 /* Wait for the write command to complete. */
164 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
165 if (ret < 0)
166 return ret;
168 return 0;
171 /* Must be called with SMI mutex held */
172 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
173 u16 val)
175 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
177 if (bus == NULL)
178 return -EINVAL;
180 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
181 addr, reg, val);
183 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
186 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
188 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
189 int ret;
191 mutex_lock(&ps->smi_mutex);
192 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
193 mutex_unlock(&ps->smi_mutex);
195 return ret;
198 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
200 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
201 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
204 return 0;
207 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
209 int i;
210 int ret;
212 for (i = 0; i < 6; i++) {
213 int j;
215 /* Write the MAC address byte. */
216 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
217 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
219 /* Wait for the write to complete. */
220 for (j = 0; j < 16; j++) {
221 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
222 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
223 break;
225 if (j == 16)
226 return -ETIMEDOUT;
229 return 0;
232 /* Must be called with SMI mutex held */
233 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
235 if (addr >= 0)
236 return _mv88e6xxx_reg_read(ds, addr, regnum);
237 return 0xffff;
240 /* Must be called with SMI mutex held */
241 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
242 u16 val)
244 if (addr >= 0)
245 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
246 return 0;
249 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
250 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
252 int ret;
253 unsigned long timeout;
255 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
256 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
257 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
259 timeout = jiffies + 1 * HZ;
260 while (time_before(jiffies, timeout)) {
261 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
262 usleep_range(1000, 2000);
263 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
264 GLOBAL_STATUS_PPU_POLLING)
265 return 0;
268 return -ETIMEDOUT;
271 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
273 int ret;
274 unsigned long timeout;
276 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
277 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
279 timeout = jiffies + 1 * HZ;
280 while (time_before(jiffies, timeout)) {
281 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
282 usleep_range(1000, 2000);
283 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
284 GLOBAL_STATUS_PPU_POLLING)
285 return 0;
288 return -ETIMEDOUT;
291 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
293 struct mv88e6xxx_priv_state *ps;
295 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
296 if (mutex_trylock(&ps->ppu_mutex)) {
297 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
299 if (mv88e6xxx_ppu_enable(ds) == 0)
300 ps->ppu_disabled = 0;
301 mutex_unlock(&ps->ppu_mutex);
305 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
307 struct mv88e6xxx_priv_state *ps = (void *)_ps;
309 schedule_work(&ps->ppu_work);
312 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
314 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
315 int ret;
317 mutex_lock(&ps->ppu_mutex);
319 /* If the PHY polling unit is enabled, disable it so that
320 * we can access the PHY registers. If it was already
321 * disabled, cancel the timer that is going to re-enable
322 * it.
324 if (!ps->ppu_disabled) {
325 ret = mv88e6xxx_ppu_disable(ds);
326 if (ret < 0) {
327 mutex_unlock(&ps->ppu_mutex);
328 return ret;
330 ps->ppu_disabled = 1;
331 } else {
332 del_timer(&ps->ppu_timer);
333 ret = 0;
336 return ret;
339 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
341 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
343 /* Schedule a timer to re-enable the PHY polling unit. */
344 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
345 mutex_unlock(&ps->ppu_mutex);
348 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
350 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
352 mutex_init(&ps->ppu_mutex);
353 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
354 init_timer(&ps->ppu_timer);
355 ps->ppu_timer.data = (unsigned long)ps;
356 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
359 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
361 int ret;
363 ret = mv88e6xxx_ppu_access_get(ds);
364 if (ret >= 0) {
365 ret = mv88e6xxx_reg_read(ds, addr, regnum);
366 mv88e6xxx_ppu_access_put(ds);
369 return ret;
372 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
373 int regnum, u16 val)
375 int ret;
377 ret = mv88e6xxx_ppu_access_get(ds);
378 if (ret >= 0) {
379 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
380 mv88e6xxx_ppu_access_put(ds);
383 return ret;
385 #endif
387 void mv88e6xxx_poll_link(struct dsa_switch *ds)
389 int i;
391 for (i = 0; i < DSA_MAX_PORTS; i++) {
392 struct net_device *dev;
393 int uninitialized_var(port_status);
394 int link;
395 int speed;
396 int duplex;
397 int fc;
399 dev = ds->ports[i];
400 if (dev == NULL)
401 continue;
403 link = 0;
404 if (dev->flags & IFF_UP) {
405 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
406 PORT_STATUS);
407 if (port_status < 0)
408 continue;
410 link = !!(port_status & PORT_STATUS_LINK);
413 if (!link) {
414 if (netif_carrier_ok(dev)) {
415 netdev_info(dev, "link down\n");
416 netif_carrier_off(dev);
418 continue;
421 switch (port_status & PORT_STATUS_SPEED_MASK) {
422 case PORT_STATUS_SPEED_10:
423 speed = 10;
424 break;
425 case PORT_STATUS_SPEED_100:
426 speed = 100;
427 break;
428 case PORT_STATUS_SPEED_1000:
429 speed = 1000;
430 break;
431 default:
432 speed = -1;
433 break;
435 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
436 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
438 if (!netif_carrier_ok(dev)) {
439 netdev_info(dev,
440 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
441 speed,
442 duplex ? "full" : "half",
443 fc ? "en" : "dis");
444 netif_carrier_on(dev);
449 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
451 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
453 switch (ps->id) {
454 case PORT_SWITCH_ID_6031:
455 case PORT_SWITCH_ID_6061:
456 case PORT_SWITCH_ID_6035:
457 case PORT_SWITCH_ID_6065:
458 return true;
460 return false;
463 static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
465 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
467 switch (ps->id) {
468 case PORT_SWITCH_ID_6092:
469 case PORT_SWITCH_ID_6095:
470 return true;
472 return false;
475 static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
477 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
479 switch (ps->id) {
480 case PORT_SWITCH_ID_6046:
481 case PORT_SWITCH_ID_6085:
482 case PORT_SWITCH_ID_6096:
483 case PORT_SWITCH_ID_6097:
484 return true;
486 return false;
489 static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
491 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
493 switch (ps->id) {
494 case PORT_SWITCH_ID_6123:
495 case PORT_SWITCH_ID_6161:
496 case PORT_SWITCH_ID_6165:
497 return true;
499 return false;
502 static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
504 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
506 switch (ps->id) {
507 case PORT_SWITCH_ID_6121:
508 case PORT_SWITCH_ID_6122:
509 case PORT_SWITCH_ID_6152:
510 case PORT_SWITCH_ID_6155:
511 case PORT_SWITCH_ID_6182:
512 case PORT_SWITCH_ID_6185:
513 case PORT_SWITCH_ID_6108:
514 case PORT_SWITCH_ID_6131:
515 return true;
517 return false;
520 static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
522 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
524 switch (ps->id) {
525 case PORT_SWITCH_ID_6320:
526 case PORT_SWITCH_ID_6321:
527 return true;
529 return false;
532 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
534 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
536 switch (ps->id) {
537 case PORT_SWITCH_ID_6171:
538 case PORT_SWITCH_ID_6175:
539 case PORT_SWITCH_ID_6350:
540 case PORT_SWITCH_ID_6351:
541 return true;
543 return false;
546 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
548 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
550 switch (ps->id) {
551 case PORT_SWITCH_ID_6172:
552 case PORT_SWITCH_ID_6176:
553 case PORT_SWITCH_ID_6240:
554 case PORT_SWITCH_ID_6352:
555 return true;
557 return false;
560 /* Must be called with SMI mutex held */
561 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
563 int ret;
564 int i;
566 for (i = 0; i < 10; i++) {
567 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
568 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
569 return 0;
572 return -ETIMEDOUT;
575 /* Must be called with SMI mutex held */
576 static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
578 int ret;
580 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
581 port = (port + 1) << 5;
583 /* Snapshot the hardware statistics counters for this port. */
584 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
585 GLOBAL_STATS_OP_CAPTURE_PORT |
586 GLOBAL_STATS_OP_HIST_RX_TX | port);
587 if (ret < 0)
588 return ret;
590 /* Wait for the snapshotting to complete. */
591 ret = _mv88e6xxx_stats_wait(ds);
592 if (ret < 0)
593 return ret;
595 return 0;
598 /* Must be called with SMI mutex held */
599 static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
601 u32 _val;
602 int ret;
604 *val = 0;
606 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
607 GLOBAL_STATS_OP_READ_CAPTURED |
608 GLOBAL_STATS_OP_HIST_RX_TX | stat);
609 if (ret < 0)
610 return;
612 ret = _mv88e6xxx_stats_wait(ds);
613 if (ret < 0)
614 return;
616 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
617 if (ret < 0)
618 return;
620 _val = ret << 16;
622 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
623 if (ret < 0)
624 return;
626 *val = _val | ret;
629 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
630 { "in_good_octets", 8, 0x00, },
631 { "in_bad_octets", 4, 0x02, },
632 { "in_unicast", 4, 0x04, },
633 { "in_broadcasts", 4, 0x06, },
634 { "in_multicasts", 4, 0x07, },
635 { "in_pause", 4, 0x16, },
636 { "in_undersize", 4, 0x18, },
637 { "in_fragments", 4, 0x19, },
638 { "in_oversize", 4, 0x1a, },
639 { "in_jabber", 4, 0x1b, },
640 { "in_rx_error", 4, 0x1c, },
641 { "in_fcs_error", 4, 0x1d, },
642 { "out_octets", 8, 0x0e, },
643 { "out_unicast", 4, 0x10, },
644 { "out_broadcasts", 4, 0x13, },
645 { "out_multicasts", 4, 0x12, },
646 { "out_pause", 4, 0x15, },
647 { "excessive", 4, 0x11, },
648 { "collisions", 4, 0x1e, },
649 { "deferred", 4, 0x05, },
650 { "single", 4, 0x14, },
651 { "multiple", 4, 0x17, },
652 { "out_fcs_error", 4, 0x03, },
653 { "late", 4, 0x1f, },
654 { "hist_64bytes", 4, 0x08, },
655 { "hist_65_127bytes", 4, 0x09, },
656 { "hist_128_255bytes", 4, 0x0a, },
657 { "hist_256_511bytes", 4, 0x0b, },
658 { "hist_512_1023bytes", 4, 0x0c, },
659 { "hist_1024_max_bytes", 4, 0x0d, },
660 /* Not all devices have the following counters */
661 { "sw_in_discards", 4, 0x110, },
662 { "sw_in_filtered", 2, 0x112, },
663 { "sw_out_filtered", 2, 0x113, },
667 static bool have_sw_in_discards(struct dsa_switch *ds)
669 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
671 switch (ps->id) {
672 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
673 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
674 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
675 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
676 case PORT_SWITCH_ID_6352:
677 return true;
678 default:
679 return false;
683 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
684 int nr_stats,
685 struct mv88e6xxx_hw_stat *stats,
686 int port, uint8_t *data)
688 int i;
690 for (i = 0; i < nr_stats; i++) {
691 memcpy(data + i * ETH_GSTRING_LEN,
692 stats[i].string, ETH_GSTRING_LEN);
696 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
697 int stat,
698 struct mv88e6xxx_hw_stat *stats,
699 int port)
701 struct mv88e6xxx_hw_stat *s = stats + stat;
702 u32 low;
703 u32 high = 0;
704 int ret;
705 u64 value;
707 if (s->reg >= 0x100) {
708 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
709 s->reg - 0x100);
710 if (ret < 0)
711 return UINT64_MAX;
713 low = ret;
714 if (s->sizeof_stat == 4) {
715 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
716 s->reg - 0x100 + 1);
717 if (ret < 0)
718 return UINT64_MAX;
719 high = ret;
721 } else {
722 _mv88e6xxx_stats_read(ds, s->reg, &low);
723 if (s->sizeof_stat == 8)
724 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
726 value = (((u64)high) << 16) | low;
727 return value;
730 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
731 int nr_stats,
732 struct mv88e6xxx_hw_stat *stats,
733 int port, uint64_t *data)
735 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
736 int ret;
737 int i;
739 mutex_lock(&ps->smi_mutex);
741 ret = _mv88e6xxx_stats_snapshot(ds, port);
742 if (ret < 0) {
743 mutex_unlock(&ps->smi_mutex);
744 return;
747 /* Read each of the counters. */
748 for (i = 0; i < nr_stats; i++)
749 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
751 mutex_unlock(&ps->smi_mutex);
754 /* All the statistics in the table */
755 void
756 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
758 if (have_sw_in_discards(ds))
759 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
760 mv88e6xxx_hw_stats, port, data);
761 else
762 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
763 mv88e6xxx_hw_stats, port, data);
766 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
768 if (have_sw_in_discards(ds))
769 return ARRAY_SIZE(mv88e6xxx_hw_stats);
770 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
773 void
774 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
775 int port, uint64_t *data)
777 if (have_sw_in_discards(ds))
778 _mv88e6xxx_get_ethtool_stats(
779 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
780 mv88e6xxx_hw_stats, port, data);
781 else
782 _mv88e6xxx_get_ethtool_stats(
783 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
784 mv88e6xxx_hw_stats, port, data);
787 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
789 return 32 * sizeof(u16);
792 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
793 struct ethtool_regs *regs, void *_p)
795 u16 *p = _p;
796 int i;
798 regs->version = 0;
800 memset(p, 0xff, 32 * sizeof(u16));
802 for (i = 0; i < 32; i++) {
803 int ret;
805 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
806 if (ret >= 0)
807 p[i] = ret;
811 /* Must be called with SMI lock held */
812 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
813 u16 mask)
815 unsigned long timeout = jiffies + HZ / 10;
817 while (time_before(jiffies, timeout)) {
818 int ret;
820 ret = _mv88e6xxx_reg_read(ds, reg, offset);
821 if (ret < 0)
822 return ret;
823 if (!(ret & mask))
824 return 0;
826 usleep_range(1000, 2000);
828 return -ETIMEDOUT;
831 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
833 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
834 int ret;
836 mutex_lock(&ps->smi_mutex);
837 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
838 mutex_unlock(&ps->smi_mutex);
840 return ret;
843 static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
845 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
846 GLOBAL2_SMI_OP_BUSY);
849 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
851 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
852 GLOBAL2_EEPROM_OP_LOAD);
855 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
857 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
858 GLOBAL2_EEPROM_OP_BUSY);
861 /* Must be called with SMI lock held */
862 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
864 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
865 GLOBAL_ATU_OP_BUSY);
868 /* Must be called with SMI lock held */
869 static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
871 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
872 GLOBAL2_SCRATCH_BUSY);
875 /* Must be called with SMI mutex held */
876 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
877 int regnum)
879 int ret;
881 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
882 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
883 regnum);
884 if (ret < 0)
885 return ret;
887 ret = _mv88e6xxx_phy_wait(ds);
888 if (ret < 0)
889 return ret;
891 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
894 /* Must be called with SMI mutex held */
895 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
896 int regnum, u16 val)
898 int ret;
900 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
901 if (ret < 0)
902 return ret;
904 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
905 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
906 regnum);
908 return _mv88e6xxx_phy_wait(ds);
911 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
913 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
914 int reg;
916 mutex_lock(&ps->smi_mutex);
918 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
919 if (reg < 0)
920 goto out;
922 e->eee_enabled = !!(reg & 0x0200);
923 e->tx_lpi_enabled = !!(reg & 0x0100);
925 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
926 if (reg < 0)
927 goto out;
929 e->eee_active = !!(reg & PORT_STATUS_EEE);
930 reg = 0;
932 out:
933 mutex_unlock(&ps->smi_mutex);
934 return reg;
937 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
938 struct phy_device *phydev, struct ethtool_eee *e)
940 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
941 int reg;
942 int ret;
944 mutex_lock(&ps->smi_mutex);
946 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
947 if (ret < 0)
948 goto out;
950 reg = ret & ~0x0300;
951 if (e->eee_enabled)
952 reg |= 0x0200;
953 if (e->tx_lpi_enabled)
954 reg |= 0x0100;
956 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
957 out:
958 mutex_unlock(&ps->smi_mutex);
960 return ret;
963 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
965 int ret;
967 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
968 if (ret < 0)
969 return ret;
971 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
972 if (ret < 0)
973 return ret;
975 return _mv88e6xxx_atu_wait(ds);
978 static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
980 int ret;
982 ret = _mv88e6xxx_atu_wait(ds);
983 if (ret < 0)
984 return ret;
986 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
989 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
991 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
992 int reg, ret = 0;
993 u8 oldstate;
995 mutex_lock(&ps->smi_mutex);
997 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
998 if (reg < 0) {
999 ret = reg;
1000 goto abort;
1003 oldstate = reg & PORT_CONTROL_STATE_MASK;
1004 if (oldstate != state) {
1005 /* Flush forwarding database if we're moving a port
1006 * from Learning or Forwarding state to Disabled or
1007 * Blocking or Listening state.
1009 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1010 state <= PORT_CONTROL_STATE_BLOCKING) {
1011 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1012 if (ret)
1013 goto abort;
1015 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1016 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1017 reg);
1020 abort:
1021 mutex_unlock(&ps->smi_mutex);
1022 return ret;
1025 /* Must be called with smi lock held */
1026 static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1028 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1029 u8 fid = ps->fid[port];
1030 u16 reg = fid << 12;
1032 if (dsa_is_cpu_port(ds, port))
1033 reg |= ds->phys_port_mask;
1034 else
1035 reg |= (ps->bridge_mask[fid] |
1036 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1038 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1041 /* Must be called with smi lock held */
1042 static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1044 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1045 int port;
1046 u32 mask;
1047 int ret;
1049 mask = ds->phys_port_mask;
1050 while (mask) {
1051 port = __ffs(mask);
1052 mask &= ~(1 << port);
1053 if (ps->fid[port] != fid)
1054 continue;
1056 ret = _mv88e6xxx_update_port_config(ds, port);
1057 if (ret)
1058 return ret;
1061 return _mv88e6xxx_flush_fid(ds, fid);
1064 /* Bridge handling functions */
1066 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1068 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1069 int ret = 0;
1070 u32 nmask;
1071 int fid;
1073 /* If the bridge group is not empty, join that group.
1074 * Otherwise create a new group.
1076 fid = ps->fid[port];
1077 nmask = br_port_mask & ~(1 << port);
1078 if (nmask)
1079 fid = ps->fid[__ffs(nmask)];
1081 nmask = ps->bridge_mask[fid] | (1 << port);
1082 if (nmask != br_port_mask) {
1083 netdev_err(ds->ports[port],
1084 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1085 fid, br_port_mask, nmask);
1086 return -EINVAL;
1089 mutex_lock(&ps->smi_mutex);
1091 ps->bridge_mask[fid] = br_port_mask;
1093 if (fid != ps->fid[port]) {
1094 clear_bit(ps->fid[port], ps->fid_bitmap);
1095 ps->fid[port] = fid;
1096 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1099 mutex_unlock(&ps->smi_mutex);
1101 return ret;
1104 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1106 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1107 u8 fid, newfid;
1108 int ret;
1110 fid = ps->fid[port];
1112 if (ps->bridge_mask[fid] != br_port_mask) {
1113 netdev_err(ds->ports[port],
1114 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1115 fid, br_port_mask, ps->bridge_mask[fid]);
1116 return -EINVAL;
1119 /* If the port was the last port of a bridge, we are done.
1120 * Otherwise assign a new fid to the port, and fix up
1121 * the bridge configuration.
1123 if (br_port_mask == (1 << port))
1124 return 0;
1126 mutex_lock(&ps->smi_mutex);
1128 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1129 if (unlikely(newfid > ps->num_ports)) {
1130 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1131 ps->num_ports);
1132 ret = -ENOSPC;
1133 goto unlock;
1136 ps->fid[port] = newfid;
1137 set_bit(newfid, ps->fid_bitmap);
1138 ps->bridge_mask[fid] &= ~(1 << port);
1139 ps->bridge_mask[newfid] = 1 << port;
1141 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1142 if (!ret)
1143 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1145 unlock:
1146 mutex_unlock(&ps->smi_mutex);
1148 return ret;
1151 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1153 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1154 int stp_state;
1156 switch (state) {
1157 case BR_STATE_DISABLED:
1158 stp_state = PORT_CONTROL_STATE_DISABLED;
1159 break;
1160 case BR_STATE_BLOCKING:
1161 case BR_STATE_LISTENING:
1162 stp_state = PORT_CONTROL_STATE_BLOCKING;
1163 break;
1164 case BR_STATE_LEARNING:
1165 stp_state = PORT_CONTROL_STATE_LEARNING;
1166 break;
1167 case BR_STATE_FORWARDING:
1168 default:
1169 stp_state = PORT_CONTROL_STATE_FORWARDING;
1170 break;
1173 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1175 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1176 * so we can not update the port state directly but need to schedule it.
1178 ps->port_state[port] = stp_state;
1179 set_bit(port, &ps->port_state_update_mask);
1180 schedule_work(&ps->bridge_work);
1182 return 0;
1185 static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1186 const u8 addr[ETH_ALEN])
1188 int i, ret;
1190 for (i = 0; i < 3; i++) {
1191 ret = _mv88e6xxx_reg_write(
1192 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1193 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1194 if (ret < 0)
1195 return ret;
1198 return 0;
1201 static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, u8 addr[ETH_ALEN])
1203 int i, ret;
1205 for (i = 0; i < 3; i++) {
1206 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1207 GLOBAL_ATU_MAC_01 + i);
1208 if (ret < 0)
1209 return ret;
1210 addr[i * 2] = ret >> 8;
1211 addr[i * 2 + 1] = ret & 0xff;
1214 return 0;
1217 static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
1218 const unsigned char *addr, int state)
1220 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1221 u8 fid = ps->fid[port];
1222 int ret;
1224 ret = _mv88e6xxx_atu_wait(ds);
1225 if (ret < 0)
1226 return ret;
1228 ret = _mv88e6xxx_atu_mac_write(ds, addr);
1229 if (ret < 0)
1230 return ret;
1232 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
1233 (0x10 << port) | state);
1234 if (ret)
1235 return ret;
1237 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
1239 return ret;
1242 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1243 const unsigned char *addr, u16 vid)
1245 int state = is_multicast_ether_addr(addr) ?
1246 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1247 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1248 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1249 int ret;
1251 mutex_lock(&ps->smi_mutex);
1252 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
1253 mutex_unlock(&ps->smi_mutex);
1255 return ret;
1258 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1259 const unsigned char *addr, u16 vid)
1261 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1262 int ret;
1264 mutex_lock(&ps->smi_mutex);
1265 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
1266 GLOBAL_ATU_DATA_STATE_UNUSED);
1267 mutex_unlock(&ps->smi_mutex);
1269 return ret;
1272 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1273 const u8 addr[ETH_ALEN],
1274 struct mv88e6xxx_atu_entry *entry)
1276 struct mv88e6xxx_atu_entry next = { 0 };
1277 int ret;
1279 next.fid = fid;
1281 ret = _mv88e6xxx_atu_wait(ds);
1282 if (ret < 0)
1283 return ret;
1285 ret = _mv88e6xxx_atu_mac_write(ds, addr);
1286 if (ret < 0)
1287 return ret;
1289 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
1290 if (ret < 0)
1291 return ret;
1293 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1294 if (ret < 0)
1295 return ret;
1297 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1298 if (ret < 0)
1299 return ret;
1301 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1302 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1303 unsigned int mask, shift;
1305 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1306 next.trunk = true;
1307 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1308 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1309 } else {
1310 next.trunk = false;
1311 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1312 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1315 next.portv_trunkid = (ret & mask) >> shift;
1318 *entry = next;
1319 return 0;
1322 static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1324 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1326 if (vid == 0)
1327 return ps->fid[port];
1329 return -ENOENT;
1332 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, u16 *vid,
1333 u8 addr[ETH_ALEN], bool *is_static)
1335 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1336 struct mv88e6xxx_atu_entry next;
1337 u16 fid;
1338 int ret;
1340 mutex_lock(&ps->smi_mutex);
1342 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1343 if (ret < 0)
1344 goto unlock;
1345 fid = ret;
1347 do {
1348 if (is_broadcast_ether_addr(addr)) {
1349 ret = -ENOENT;
1350 goto unlock;
1353 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1354 if (ret < 0)
1355 goto unlock;
1357 ether_addr_copy(addr, next.mac);
1359 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1360 continue;
1361 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1363 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1364 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1365 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1366 unlock:
1367 mutex_unlock(&ps->smi_mutex);
1369 return ret;
1372 static void mv88e6xxx_bridge_work(struct work_struct *work)
1374 struct mv88e6xxx_priv_state *ps;
1375 struct dsa_switch *ds;
1376 int port;
1378 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1379 ds = ((struct dsa_switch *)ps) - 1;
1381 while (ps->port_state_update_mask) {
1382 port = __ffs(ps->port_state_update_mask);
1383 clear_bit(port, &ps->port_state_update_mask);
1384 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1388 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
1390 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1391 int ret, fid;
1392 u16 reg;
1394 mutex_lock(&ps->smi_mutex);
1396 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1397 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1398 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1399 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
1400 /* MAC Forcing register: don't force link, speed,
1401 * duplex or flow control state to any particular
1402 * values on physical ports, but force the CPU port
1403 * and all DSA ports to their maximum bandwidth and
1404 * full duplex.
1406 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
1407 if (dsa_is_cpu_port(ds, port) ||
1408 ds->dsa_port_mask & (1 << port)) {
1409 reg |= PORT_PCS_CTRL_FORCE_LINK |
1410 PORT_PCS_CTRL_LINK_UP |
1411 PORT_PCS_CTRL_DUPLEX_FULL |
1412 PORT_PCS_CTRL_FORCE_DUPLEX;
1413 if (mv88e6xxx_6065_family(ds))
1414 reg |= PORT_PCS_CTRL_100;
1415 else
1416 reg |= PORT_PCS_CTRL_1000;
1417 } else {
1418 reg |= PORT_PCS_CTRL_UNFORCED;
1421 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1422 PORT_PCS_CTRL, reg);
1423 if (ret)
1424 goto abort;
1427 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1428 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1429 * tunneling, determine priority by looking at 802.1p and IP
1430 * priority fields (IP prio has precedence), and set STP state
1431 * to Forwarding.
1433 * If this is the CPU link, use DSA or EDSA tagging depending
1434 * on which tagging mode was configured.
1436 * If this is a link to another switch, use DSA tagging mode.
1438 * If this is the upstream port for this switch, enable
1439 * forwarding of unknown unicasts and multicasts.
1441 reg = 0;
1442 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1443 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1444 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1445 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
1446 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
1447 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
1448 PORT_CONTROL_STATE_FORWARDING;
1449 if (dsa_is_cpu_port(ds, port)) {
1450 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1451 reg |= PORT_CONTROL_DSA_TAG;
1452 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1453 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1454 mv88e6xxx_6320_family(ds)) {
1455 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1456 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
1457 else
1458 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1461 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1462 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1463 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1464 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
1465 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1466 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
1469 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1470 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1471 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1472 mv88e6xxx_6320_family(ds)) {
1473 if (ds->dsa_port_mask & (1 << port))
1474 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1475 if (port == dsa_upstream_port(ds))
1476 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1477 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1479 if (reg) {
1480 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1481 PORT_CONTROL, reg);
1482 if (ret)
1483 goto abort;
1486 /* Port Control 2: don't force a good FCS, set the maximum
1487 * frame size to 10240 bytes, don't let the switch add or
1488 * strip 802.1q tags, don't discard tagged or untagged frames
1489 * on this port, do a destination address lookup on all
1490 * received packets as usual, disable ARP mirroring and don't
1491 * send a copy of all transmitted/received frames on this port
1492 * to the CPU.
1494 reg = 0;
1495 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1496 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1497 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
1498 reg = PORT_CONTROL_2_MAP_DA;
1500 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1501 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
1502 reg |= PORT_CONTROL_2_JUMBO_10240;
1504 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
1505 /* Set the upstream port this port should use */
1506 reg |= dsa_upstream_port(ds);
1507 /* enable forwarding of unknown multicast addresses to
1508 * the upstream port
1510 if (port == dsa_upstream_port(ds))
1511 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
1514 if (reg) {
1515 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1516 PORT_CONTROL_2, reg);
1517 if (ret)
1518 goto abort;
1521 /* Port Association Vector: when learning source addresses
1522 * of packets, add the address to the address database using
1523 * a port bitmap that has only the bit for this port set and
1524 * the other bits clear.
1526 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
1527 1 << port);
1528 if (ret)
1529 goto abort;
1531 /* Egress rate control 2: disable egress rate control. */
1532 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
1533 0x0000);
1534 if (ret)
1535 goto abort;
1537 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1538 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1539 mv88e6xxx_6320_family(ds)) {
1540 /* Do not limit the period of time that this port can
1541 * be paused for by the remote end or the period of
1542 * time that this port can pause the remote end.
1544 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1545 PORT_PAUSE_CTRL, 0x0000);
1546 if (ret)
1547 goto abort;
1549 /* Port ATU control: disable limiting the number of
1550 * address database entries that this port is allowed
1551 * to use.
1553 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1554 PORT_ATU_CONTROL, 0x0000);
1555 /* Priority Override: disable DA, SA and VTU priority
1556 * override.
1558 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1559 PORT_PRI_OVERRIDE, 0x0000);
1560 if (ret)
1561 goto abort;
1563 /* Port Ethertype: use the Ethertype DSA Ethertype
1564 * value.
1566 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1567 PORT_ETH_TYPE, ETH_P_EDSA);
1568 if (ret)
1569 goto abort;
1570 /* Tag Remap: use an identity 802.1p prio -> switch
1571 * prio mapping.
1573 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1574 PORT_TAG_REGMAP_0123, 0x3210);
1575 if (ret)
1576 goto abort;
1578 /* Tag Remap 2: use an identity 802.1p prio -> switch
1579 * prio mapping.
1581 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1582 PORT_TAG_REGMAP_4567, 0x7654);
1583 if (ret)
1584 goto abort;
1587 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1588 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1589 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1590 mv88e6xxx_6320_family(ds)) {
1591 /* Rate Control: disable ingress rate limiting. */
1592 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1593 PORT_RATE_CONTROL, 0x0001);
1594 if (ret)
1595 goto abort;
1598 /* Port Control 1: disable trunking, disable sending
1599 * learning messages to this port.
1601 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
1602 if (ret)
1603 goto abort;
1605 /* Port based VLAN map: give each port its own address
1606 * database, allow the CPU port to talk to each of the 'real'
1607 * ports, and allow each of the 'real' ports to only talk to
1608 * the upstream port.
1610 fid = port + 1;
1611 ps->fid[port] = fid;
1612 set_bit(fid, ps->fid_bitmap);
1614 if (!dsa_is_cpu_port(ds, port))
1615 ps->bridge_mask[fid] = 1 << port;
1617 ret = _mv88e6xxx_update_port_config(ds, port);
1618 if (ret)
1619 goto abort;
1621 /* Default VLAN ID and priority: don't set a default VLAN
1622 * ID, and set the default packet priority to zero.
1624 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1625 0x0000);
1626 abort:
1627 mutex_unlock(&ps->smi_mutex);
1628 return ret;
1631 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
1633 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1634 int ret;
1635 int i;
1637 for (i = 0; i < ps->num_ports; i++) {
1638 ret = mv88e6xxx_setup_port(ds, i);
1639 if (ret < 0)
1640 return ret;
1642 return 0;
1645 static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
1647 struct dsa_switch *ds = s->private;
1649 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1650 int reg, port;
1652 seq_puts(s, " GLOBAL GLOBAL2 ");
1653 for (port = 0 ; port < ps->num_ports; port++)
1654 seq_printf(s, " %2d ", port);
1655 seq_puts(s, "\n");
1657 for (reg = 0; reg < 32; reg++) {
1658 seq_printf(s, "%2x: ", reg);
1659 seq_printf(s, " %4x %4x ",
1660 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
1661 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
1663 for (port = 0 ; port < ps->num_ports; port++)
1664 seq_printf(s, "%4x ",
1665 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
1666 seq_puts(s, "\n");
1669 return 0;
1672 static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
1674 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
1677 static const struct file_operations mv88e6xxx_regs_fops = {
1678 .open = mv88e6xxx_regs_open,
1679 .read = seq_read,
1680 .llseek = no_llseek,
1681 .release = single_release,
1682 .owner = THIS_MODULE,
1685 static void mv88e6xxx_atu_show_header(struct seq_file *s)
1687 seq_puts(s, "DB T/P Vec State Addr\n");
1690 static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
1691 unsigned char *addr, int data)
1693 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
1694 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
1695 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
1696 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
1698 seq_printf(s, "%03x %5s %10pb %x %pM\n",
1699 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
1702 static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
1703 int dbnum)
1705 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1706 unsigned char addr[6];
1707 int ret, data, state;
1709 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
1710 if (ret < 0)
1711 return ret;
1713 do {
1714 ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
1715 if (ret < 0)
1716 return ret;
1717 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1718 if (data < 0)
1719 return data;
1721 state = data & GLOBAL_ATU_DATA_STATE_MASK;
1722 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
1723 break;
1724 ret = _mv88e6xxx_atu_mac_read(ds, addr);
1725 if (ret < 0)
1726 return ret;
1727 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
1728 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
1730 return 0;
1733 static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
1735 struct dsa_switch *ds = s->private;
1736 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1737 int dbnum;
1739 mv88e6xxx_atu_show_header(s);
1741 for (dbnum = 0; dbnum < 255; dbnum++) {
1742 mutex_lock(&ps->smi_mutex);
1743 mv88e6xxx_atu_show_db(s, ds, dbnum);
1744 mutex_unlock(&ps->smi_mutex);
1747 return 0;
1750 static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
1752 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
1755 static const struct file_operations mv88e6xxx_atu_fops = {
1756 .open = mv88e6xxx_atu_open,
1757 .read = seq_read,
1758 .llseek = no_llseek,
1759 .release = single_release,
1760 .owner = THIS_MODULE,
1763 static void mv88e6xxx_stats_show_header(struct seq_file *s,
1764 struct mv88e6xxx_priv_state *ps)
1766 int port;
1768 seq_puts(s, " Statistic ");
1769 for (port = 0 ; port < ps->num_ports; port++)
1770 seq_printf(s, "Port %2d ", port);
1771 seq_puts(s, "\n");
1774 static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
1776 struct dsa_switch *ds = s->private;
1777 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1778 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
1779 int port, stat, max_stats;
1780 uint64_t value;
1782 if (have_sw_in_discards(ds))
1783 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
1784 else
1785 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
1787 mv88e6xxx_stats_show_header(s, ps);
1789 mutex_lock(&ps->smi_mutex);
1791 for (stat = 0; stat < max_stats; stat++) {
1792 seq_printf(s, "%19s: ", stats[stat].string);
1793 for (port = 0 ; port < ps->num_ports; port++) {
1794 _mv88e6xxx_stats_snapshot(ds, port);
1795 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
1796 port);
1797 seq_printf(s, "%8llu ", value);
1799 seq_puts(s, "\n");
1801 mutex_unlock(&ps->smi_mutex);
1803 return 0;
1806 static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
1808 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
1811 static const struct file_operations mv88e6xxx_stats_fops = {
1812 .open = mv88e6xxx_stats_open,
1813 .read = seq_read,
1814 .llseek = no_llseek,
1815 .release = single_release,
1816 .owner = THIS_MODULE,
1819 static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
1821 struct dsa_switch *ds = s->private;
1822 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1823 int target, ret;
1825 seq_puts(s, "Target Port\n");
1827 mutex_lock(&ps->smi_mutex);
1828 for (target = 0; target < 32; target++) {
1829 ret = _mv88e6xxx_reg_write(
1830 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
1831 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
1832 if (ret < 0)
1833 goto out;
1834 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
1835 GLOBAL2_DEVICE_MAPPING);
1836 seq_printf(s, " %2d %2d\n", target,
1837 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
1839 out:
1840 mutex_unlock(&ps->smi_mutex);
1842 return 0;
1845 static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
1847 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
1850 static const struct file_operations mv88e6xxx_device_map_fops = {
1851 .open = mv88e6xxx_device_map_open,
1852 .read = seq_read,
1853 .llseek = no_llseek,
1854 .release = single_release,
1855 .owner = THIS_MODULE,
1858 static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
1860 struct dsa_switch *ds = s->private;
1861 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1862 int reg, ret;
1864 seq_puts(s, "Register Value\n");
1866 mutex_lock(&ps->smi_mutex);
1867 for (reg = 0; reg < 0x80; reg++) {
1868 ret = _mv88e6xxx_reg_write(
1869 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
1870 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
1871 if (ret < 0)
1872 goto out;
1874 ret = _mv88e6xxx_scratch_wait(ds);
1875 if (ret < 0)
1876 goto out;
1878 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
1879 GLOBAL2_SCRATCH_MISC);
1880 seq_printf(s, " %2x %2x\n", reg,
1881 ret & GLOBAL2_SCRATCH_VALUE_MASK);
1883 out:
1884 mutex_unlock(&ps->smi_mutex);
1886 return 0;
1889 static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
1891 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
1894 static const struct file_operations mv88e6xxx_scratch_fops = {
1895 .open = mv88e6xxx_scratch_open,
1896 .read = seq_read,
1897 .llseek = no_llseek,
1898 .release = single_release,
1899 .owner = THIS_MODULE,
1902 int mv88e6xxx_setup_common(struct dsa_switch *ds)
1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1905 char *name;
1907 mutex_init(&ps->smi_mutex);
1909 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
1911 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
1913 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
1914 ps->dbgfs = debugfs_create_dir(name, NULL);
1915 kfree(name);
1917 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
1918 &mv88e6xxx_regs_fops);
1920 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
1921 &mv88e6xxx_atu_fops);
1923 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
1924 &mv88e6xxx_stats_fops);
1926 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
1927 &mv88e6xxx_device_map_fops);
1929 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
1930 &mv88e6xxx_scratch_fops);
1931 return 0;
1934 int mv88e6xxx_setup_global(struct dsa_switch *ds)
1936 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1937 int ret;
1938 int i;
1940 /* Set the default address aging time to 5 minutes, and
1941 * enable address learn messages to be sent to all message
1942 * ports.
1944 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
1945 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
1947 /* Configure the IP ToS mapping registers. */
1948 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
1949 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
1950 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
1951 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
1952 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
1953 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
1954 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
1955 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
1957 /* Configure the IEEE 802.1p priority mapping register. */
1958 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
1960 /* Send all frames with destination addresses matching
1961 * 01:80:c2:00:00:0x to the CPU port.
1963 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
1965 /* Ignore removed tag data on doubly tagged packets, disable
1966 * flow control messages, force flow control priority to the
1967 * highest, and send all special multicast frames to the CPU
1968 * port at the highest priority.
1970 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
1971 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
1972 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
1974 /* Program the DSA routing table. */
1975 for (i = 0; i < 32; i++) {
1976 int nexthop = 0x1f;
1978 if (ds->pd->rtable &&
1979 i != ds->index && i < ds->dst->pd->nr_chips)
1980 nexthop = ds->pd->rtable[i] & 0x1f;
1982 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
1983 GLOBAL2_DEVICE_MAPPING_UPDATE |
1984 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
1985 nexthop);
1988 /* Clear all trunk masks. */
1989 for (i = 0; i < 8; i++)
1990 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
1991 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
1992 ((1 << ps->num_ports) - 1));
1994 /* Clear all trunk mappings. */
1995 for (i = 0; i < 16; i++)
1996 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
1997 GLOBAL2_TRUNK_MAPPING_UPDATE |
1998 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2000 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2001 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2002 mv88e6xxx_6320_family(ds)) {
2003 /* Send all frames with destination addresses matching
2004 * 01:80:c2:00:00:2x to the CPU port.
2006 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2008 /* Initialise cross-chip port VLAN table to reset
2009 * defaults.
2011 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2013 /* Clear the priority override table. */
2014 for (i = 0; i < 16; i++)
2015 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2016 0x8000 | (i << 8));
2019 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2020 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2021 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2022 mv88e6xxx_6320_family(ds)) {
2023 /* Disable ingress rate limiting by resetting all
2024 * ingress rate limit registers to their initial
2025 * state.
2027 for (i = 0; i < ps->num_ports; i++)
2028 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2029 0x9000 | (i << 8));
2032 /* Clear the statistics counters for all ports */
2033 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2035 /* Wait for the flush to complete. */
2036 mutex_lock(&ps->smi_mutex);
2037 ret = _mv88e6xxx_stats_wait(ds);
2038 mutex_unlock(&ps->smi_mutex);
2040 return ret;
2043 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2045 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2046 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2047 unsigned long timeout;
2048 int ret;
2049 int i;
2051 /* Set all ports to the disabled state. */
2052 for (i = 0; i < ps->num_ports; i++) {
2053 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2054 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
2057 /* Wait for transmit queues to drain. */
2058 usleep_range(2000, 4000);
2060 /* Reset the switch. Keep the PPU active if requested. The PPU
2061 * needs to be active to support indirect phy register access
2062 * through global registers 0x18 and 0x19.
2064 if (ppu_active)
2065 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2066 else
2067 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2069 /* Wait up to one second for reset to complete. */
2070 timeout = jiffies + 1 * HZ;
2071 while (time_before(jiffies, timeout)) {
2072 ret = REG_READ(REG_GLOBAL, 0x00);
2073 if ((ret & is_reset) == is_reset)
2074 break;
2075 usleep_range(1000, 2000);
2077 if (time_after(jiffies, timeout))
2078 return -ETIMEDOUT;
2080 return 0;
2083 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2085 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2086 int ret;
2088 mutex_lock(&ps->smi_mutex);
2089 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2090 if (ret < 0)
2091 goto error;
2092 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2093 error:
2094 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2095 mutex_unlock(&ps->smi_mutex);
2096 return ret;
2099 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2100 int reg, int val)
2102 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2103 int ret;
2105 mutex_lock(&ps->smi_mutex);
2106 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2107 if (ret < 0)
2108 goto error;
2110 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2111 error:
2112 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2113 mutex_unlock(&ps->smi_mutex);
2114 return ret;
2117 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2119 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2121 if (port >= 0 && port < ps->num_ports)
2122 return port;
2123 return -EINVAL;
2127 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2129 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2130 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2131 int ret;
2133 if (addr < 0)
2134 return addr;
2136 mutex_lock(&ps->smi_mutex);
2137 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
2138 mutex_unlock(&ps->smi_mutex);
2139 return ret;
2143 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2145 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2146 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2147 int ret;
2149 if (addr < 0)
2150 return addr;
2152 mutex_lock(&ps->smi_mutex);
2153 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
2154 mutex_unlock(&ps->smi_mutex);
2155 return ret;
2159 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2161 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2162 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2163 int ret;
2165 if (addr < 0)
2166 return addr;
2168 mutex_lock(&ps->smi_mutex);
2169 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
2170 mutex_unlock(&ps->smi_mutex);
2171 return ret;
2175 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2176 u16 val)
2178 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2179 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2180 int ret;
2182 if (addr < 0)
2183 return addr;
2185 mutex_lock(&ps->smi_mutex);
2186 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
2187 mutex_unlock(&ps->smi_mutex);
2188 return ret;
2191 #ifdef CONFIG_NET_DSA_HWMON
2193 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2195 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2196 int ret;
2197 int val;
2199 *temp = 0;
2201 mutex_lock(&ps->smi_mutex);
2203 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2204 if (ret < 0)
2205 goto error;
2207 /* Enable temperature sensor */
2208 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2209 if (ret < 0)
2210 goto error;
2212 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2213 if (ret < 0)
2214 goto error;
2216 /* Wait for temperature to stabilize */
2217 usleep_range(10000, 12000);
2219 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2220 if (val < 0) {
2221 ret = val;
2222 goto error;
2225 /* Disable temperature sensor */
2226 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2227 if (ret < 0)
2228 goto error;
2230 *temp = ((val & 0x1f) - 5) * 5;
2232 error:
2233 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2234 mutex_unlock(&ps->smi_mutex);
2235 return ret;
2238 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2240 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2241 int ret;
2243 *temp = 0;
2245 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2246 if (ret < 0)
2247 return ret;
2249 *temp = (ret & 0xff) - 25;
2251 return 0;
2254 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2256 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2257 return mv88e63xx_get_temp(ds, temp);
2259 return mv88e61xx_get_temp(ds, temp);
2262 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2264 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2265 int ret;
2267 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2268 return -EOPNOTSUPP;
2270 *temp = 0;
2272 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2273 if (ret < 0)
2274 return ret;
2276 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2278 return 0;
2281 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2283 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2284 int ret;
2286 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2287 return -EOPNOTSUPP;
2289 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2290 if (ret < 0)
2291 return ret;
2292 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2293 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2294 (ret & 0xe0ff) | (temp << 8));
2297 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2299 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2300 int ret;
2302 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2303 return -EOPNOTSUPP;
2305 *alarm = false;
2307 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2308 if (ret < 0)
2309 return ret;
2311 *alarm = !!(ret & 0x40);
2313 return 0;
2315 #endif /* CONFIG_NET_DSA_HWMON */
2317 static int __init mv88e6xxx_init(void)
2319 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2320 register_switch_driver(&mv88e6131_switch_driver);
2321 #endif
2322 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2323 register_switch_driver(&mv88e6123_61_65_switch_driver);
2324 #endif
2325 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2326 register_switch_driver(&mv88e6352_switch_driver);
2327 #endif
2328 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2329 register_switch_driver(&mv88e6171_switch_driver);
2330 #endif
2331 return 0;
2333 module_init(mv88e6xxx_init);
2335 static void __exit mv88e6xxx_cleanup(void)
2337 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2338 unregister_switch_driver(&mv88e6171_switch_driver);
2339 #endif
2340 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2341 unregister_switch_driver(&mv88e6352_switch_driver);
2342 #endif
2343 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2344 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2345 #endif
2346 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2347 unregister_switch_driver(&mv88e6131_switch_driver);
2348 #endif
2350 module_exit(mv88e6xxx_cleanup);
2352 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2353 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2354 MODULE_LICENSE("GPL");