2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/delay.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/jiffies.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/phy.h>
20 #include "mv88e6xxx.h"
22 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
23 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
24 * will be directly accessible on some {device address,register address}
25 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
26 * will only respond to SMI transactions to that specific address, and
27 * an indirect addressing mechanism needs to be used to access its
30 static int mv88e6xxx_reg_wait_ready(struct mii_bus
*bus
, int sw_addr
)
35 for (i
= 0; i
< 16; i
++) {
36 ret
= mdiobus_read(bus
, sw_addr
, 0);
40 if ((ret
& 0x8000) == 0)
47 int __mv88e6xxx_reg_read(struct mii_bus
*bus
, int sw_addr
, int addr
, int reg
)
52 return mdiobus_read(bus
, addr
, reg
);
54 /* Wait for the bus to become free. */
55 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
59 /* Transmit the read command. */
60 ret
= mdiobus_write(bus
, sw_addr
, 0, 0x9800 | (addr
<< 5) | reg
);
64 /* Wait for the read command to complete. */
65 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
70 ret
= mdiobus_read(bus
, sw_addr
, 1);
77 /* Must be called with SMI mutex held */
78 static int _mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
80 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
86 ret
= __mv88e6xxx_reg_read(bus
, ds
->pd
->sw_addr
, addr
, reg
);
90 dev_dbg(ds
->master_dev
, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
96 int mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
98 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
101 mutex_lock(&ps
->smi_mutex
);
102 ret
= _mv88e6xxx_reg_read(ds
, addr
, reg
);
103 mutex_unlock(&ps
->smi_mutex
);
108 int __mv88e6xxx_reg_write(struct mii_bus
*bus
, int sw_addr
, int addr
,
114 return mdiobus_write(bus
, addr
, reg
, val
);
116 /* Wait for the bus to become free. */
117 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
121 /* Transmit the data to write. */
122 ret
= mdiobus_write(bus
, sw_addr
, 1, val
);
126 /* Transmit the write command. */
127 ret
= mdiobus_write(bus
, sw_addr
, 0, 0x9400 | (addr
<< 5) | reg
);
131 /* Wait for the write command to complete. */
132 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
139 /* Must be called with SMI mutex held */
140 static int _mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
,
143 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
148 dev_dbg(ds
->master_dev
, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
151 return __mv88e6xxx_reg_write(bus
, ds
->pd
->sw_addr
, addr
, reg
, val
);
154 int mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
, u16 val
)
156 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
159 mutex_lock(&ps
->smi_mutex
);
160 ret
= _mv88e6xxx_reg_write(ds
, addr
, reg
, val
);
161 mutex_unlock(&ps
->smi_mutex
);
166 int mv88e6xxx_config_prio(struct dsa_switch
*ds
)
168 /* Configure the IP ToS mapping registers. */
169 REG_WRITE(REG_GLOBAL
, 0x10, 0x0000);
170 REG_WRITE(REG_GLOBAL
, 0x11, 0x0000);
171 REG_WRITE(REG_GLOBAL
, 0x12, 0x5555);
172 REG_WRITE(REG_GLOBAL
, 0x13, 0x5555);
173 REG_WRITE(REG_GLOBAL
, 0x14, 0xaaaa);
174 REG_WRITE(REG_GLOBAL
, 0x15, 0xaaaa);
175 REG_WRITE(REG_GLOBAL
, 0x16, 0xffff);
176 REG_WRITE(REG_GLOBAL
, 0x17, 0xffff);
178 /* Configure the IEEE 802.1p priority mapping register. */
179 REG_WRITE(REG_GLOBAL
, 0x18, 0xfa41);
184 int mv88e6xxx_set_addr_direct(struct dsa_switch
*ds
, u8
*addr
)
186 REG_WRITE(REG_GLOBAL
, 0x01, (addr
[0] << 8) | addr
[1]);
187 REG_WRITE(REG_GLOBAL
, 0x02, (addr
[2] << 8) | addr
[3]);
188 REG_WRITE(REG_GLOBAL
, 0x03, (addr
[4] << 8) | addr
[5]);
193 int mv88e6xxx_set_addr_indirect(struct dsa_switch
*ds
, u8
*addr
)
198 for (i
= 0; i
< 6; i
++) {
201 /* Write the MAC address byte. */
202 REG_WRITE(REG_GLOBAL2
, 0x0d, 0x8000 | (i
<< 8) | addr
[i
]);
204 /* Wait for the write to complete. */
205 for (j
= 0; j
< 16; j
++) {
206 ret
= REG_READ(REG_GLOBAL2
, 0x0d);
207 if ((ret
& 0x8000) == 0)
217 /* Must be called with phy mutex held */
218 static int _mv88e6xxx_phy_read(struct dsa_switch
*ds
, int addr
, int regnum
)
221 return mv88e6xxx_reg_read(ds
, addr
, regnum
);
225 /* Must be called with phy mutex held */
226 static int _mv88e6xxx_phy_write(struct dsa_switch
*ds
, int addr
, int regnum
,
230 return mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
234 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
235 static int mv88e6xxx_ppu_disable(struct dsa_switch
*ds
)
238 unsigned long timeout
;
240 ret
= REG_READ(REG_GLOBAL
, 0x04);
241 REG_WRITE(REG_GLOBAL
, 0x04, ret
& ~0x4000);
243 timeout
= jiffies
+ 1 * HZ
;
244 while (time_before(jiffies
, timeout
)) {
245 ret
= REG_READ(REG_GLOBAL
, 0x00);
246 usleep_range(1000, 2000);
247 if ((ret
& 0xc000) != 0xc000)
254 static int mv88e6xxx_ppu_enable(struct dsa_switch
*ds
)
257 unsigned long timeout
;
259 ret
= REG_READ(REG_GLOBAL
, 0x04);
260 REG_WRITE(REG_GLOBAL
, 0x04, ret
| 0x4000);
262 timeout
= jiffies
+ 1 * HZ
;
263 while (time_before(jiffies
, timeout
)) {
264 ret
= REG_READ(REG_GLOBAL
, 0x00);
265 usleep_range(1000, 2000);
266 if ((ret
& 0xc000) == 0xc000)
273 static void mv88e6xxx_ppu_reenable_work(struct work_struct
*ugly
)
275 struct mv88e6xxx_priv_state
*ps
;
277 ps
= container_of(ugly
, struct mv88e6xxx_priv_state
, ppu_work
);
278 if (mutex_trylock(&ps
->ppu_mutex
)) {
279 struct dsa_switch
*ds
= ((struct dsa_switch
*)ps
) - 1;
281 if (mv88e6xxx_ppu_enable(ds
) == 0)
282 ps
->ppu_disabled
= 0;
283 mutex_unlock(&ps
->ppu_mutex
);
287 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps
)
289 struct mv88e6xxx_priv_state
*ps
= (void *)_ps
;
291 schedule_work(&ps
->ppu_work
);
294 static int mv88e6xxx_ppu_access_get(struct dsa_switch
*ds
)
296 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
299 mutex_lock(&ps
->ppu_mutex
);
301 /* If the PHY polling unit is enabled, disable it so that
302 * we can access the PHY registers. If it was already
303 * disabled, cancel the timer that is going to re-enable
306 if (!ps
->ppu_disabled
) {
307 ret
= mv88e6xxx_ppu_disable(ds
);
309 mutex_unlock(&ps
->ppu_mutex
);
312 ps
->ppu_disabled
= 1;
314 del_timer(&ps
->ppu_timer
);
321 static void mv88e6xxx_ppu_access_put(struct dsa_switch
*ds
)
323 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
325 /* Schedule a timer to re-enable the PHY polling unit. */
326 mod_timer(&ps
->ppu_timer
, jiffies
+ msecs_to_jiffies(10));
327 mutex_unlock(&ps
->ppu_mutex
);
330 void mv88e6xxx_ppu_state_init(struct dsa_switch
*ds
)
332 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
334 mutex_init(&ps
->ppu_mutex
);
335 INIT_WORK(&ps
->ppu_work
, mv88e6xxx_ppu_reenable_work
);
336 init_timer(&ps
->ppu_timer
);
337 ps
->ppu_timer
.data
= (unsigned long)ps
;
338 ps
->ppu_timer
.function
= mv88e6xxx_ppu_reenable_timer
;
341 int mv88e6xxx_phy_read_ppu(struct dsa_switch
*ds
, int addr
, int regnum
)
345 ret
= mv88e6xxx_ppu_access_get(ds
);
347 ret
= mv88e6xxx_reg_read(ds
, addr
, regnum
);
348 mv88e6xxx_ppu_access_put(ds
);
354 int mv88e6xxx_phy_write_ppu(struct dsa_switch
*ds
, int addr
,
359 ret
= mv88e6xxx_ppu_access_get(ds
);
361 ret
= mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
362 mv88e6xxx_ppu_access_put(ds
);
369 void mv88e6xxx_poll_link(struct dsa_switch
*ds
)
373 for (i
= 0; i
< DSA_MAX_PORTS
; i
++) {
374 struct net_device
*dev
;
375 int uninitialized_var(port_status
);
386 if (dev
->flags
& IFF_UP
) {
387 port_status
= mv88e6xxx_reg_read(ds
, REG_PORT(i
), 0x00);
391 link
= !!(port_status
& 0x0800);
395 if (netif_carrier_ok(dev
)) {
396 netdev_info(dev
, "link down\n");
397 netif_carrier_off(dev
);
402 switch (port_status
& 0x0300) {
416 duplex
= (port_status
& 0x0400) ? 1 : 0;
417 fc
= (port_status
& 0x8000) ? 1 : 0;
419 if (!netif_carrier_ok(dev
)) {
421 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
423 duplex
? "full" : "half",
425 netif_carrier_on(dev
);
430 static int mv88e6xxx_stats_wait(struct dsa_switch
*ds
)
435 for (i
= 0; i
< 10; i
++) {
436 ret
= REG_READ(REG_GLOBAL
, 0x1d);
437 if ((ret
& 0x8000) == 0)
444 static int mv88e6xxx_stats_snapshot(struct dsa_switch
*ds
, int port
)
448 /* Snapshot the hardware statistics counters for this port. */
449 REG_WRITE(REG_GLOBAL
, 0x1d, 0xdc00 | port
);
451 /* Wait for the snapshotting to complete. */
452 ret
= mv88e6xxx_stats_wait(ds
);
459 static void mv88e6xxx_stats_read(struct dsa_switch
*ds
, int stat
, u32
*val
)
466 ret
= mv88e6xxx_reg_write(ds
, REG_GLOBAL
, 0x1d, 0xcc00 | stat
);
470 ret
= mv88e6xxx_stats_wait(ds
);
474 ret
= mv88e6xxx_reg_read(ds
, REG_GLOBAL
, 0x1e);
480 ret
= mv88e6xxx_reg_read(ds
, REG_GLOBAL
, 0x1f);
487 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats
[] = {
488 { "in_good_octets", 8, 0x00, },
489 { "in_bad_octets", 4, 0x02, },
490 { "in_unicast", 4, 0x04, },
491 { "in_broadcasts", 4, 0x06, },
492 { "in_multicasts", 4, 0x07, },
493 { "in_pause", 4, 0x16, },
494 { "in_undersize", 4, 0x18, },
495 { "in_fragments", 4, 0x19, },
496 { "in_oversize", 4, 0x1a, },
497 { "in_jabber", 4, 0x1b, },
498 { "in_rx_error", 4, 0x1c, },
499 { "in_fcs_error", 4, 0x1d, },
500 { "out_octets", 8, 0x0e, },
501 { "out_unicast", 4, 0x10, },
502 { "out_broadcasts", 4, 0x13, },
503 { "out_multicasts", 4, 0x12, },
504 { "out_pause", 4, 0x15, },
505 { "excessive", 4, 0x11, },
506 { "collisions", 4, 0x1e, },
507 { "deferred", 4, 0x05, },
508 { "single", 4, 0x14, },
509 { "multiple", 4, 0x17, },
510 { "out_fcs_error", 4, 0x03, },
511 { "late", 4, 0x1f, },
512 { "hist_64bytes", 4, 0x08, },
513 { "hist_65_127bytes", 4, 0x09, },
514 { "hist_128_255bytes", 4, 0x0a, },
515 { "hist_256_511bytes", 4, 0x0b, },
516 { "hist_512_1023bytes", 4, 0x0c, },
517 { "hist_1024_max_bytes", 4, 0x0d, },
518 /* Not all devices have the following counters */
519 { "sw_in_discards", 4, 0x110, },
520 { "sw_in_filtered", 2, 0x112, },
521 { "sw_out_filtered", 2, 0x113, },
525 static bool have_sw_in_discards(struct dsa_switch
*ds
)
527 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
530 case ID_6095
: case ID_6161
: case ID_6165
:
531 case ID_6171
: case ID_6172
: case ID_6176
:
532 case ID_6182
: case ID_6185
: case ID_6352
:
539 static void _mv88e6xxx_get_strings(struct dsa_switch
*ds
,
541 struct mv88e6xxx_hw_stat
*stats
,
542 int port
, uint8_t *data
)
546 for (i
= 0; i
< nr_stats
; i
++) {
547 memcpy(data
+ i
* ETH_GSTRING_LEN
,
548 stats
[i
].string
, ETH_GSTRING_LEN
);
552 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
554 struct mv88e6xxx_hw_stat
*stats
,
555 int port
, uint64_t *data
)
557 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
561 mutex_lock(&ps
->stats_mutex
);
563 ret
= mv88e6xxx_stats_snapshot(ds
, port
);
565 mutex_unlock(&ps
->stats_mutex
);
569 /* Read each of the counters. */
570 for (i
= 0; i
< nr_stats
; i
++) {
571 struct mv88e6xxx_hw_stat
*s
= stats
+ i
;
575 if (s
->reg
>= 0x100) {
578 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
),
583 if (s
->sizeof_stat
== 4) {
584 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
),
590 data
[i
] = (((u64
)high
) << 16) | low
;
593 mv88e6xxx_stats_read(ds
, s
->reg
, &low
);
594 if (s
->sizeof_stat
== 8)
595 mv88e6xxx_stats_read(ds
, s
->reg
+ 1, &high
);
597 data
[i
] = (((u64
)high
) << 32) | low
;
600 mutex_unlock(&ps
->stats_mutex
);
603 /* All the statistics in the table */
605 mv88e6xxx_get_strings(struct dsa_switch
*ds
, int port
, uint8_t *data
)
607 if (have_sw_in_discards(ds
))
608 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
609 mv88e6xxx_hw_stats
, port
, data
);
611 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
612 mv88e6xxx_hw_stats
, port
, data
);
615 int mv88e6xxx_get_sset_count(struct dsa_switch
*ds
)
617 if (have_sw_in_discards(ds
))
618 return ARRAY_SIZE(mv88e6xxx_hw_stats
);
619 return ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3;
623 mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
624 int port
, uint64_t *data
)
626 if (have_sw_in_discards(ds
))
627 _mv88e6xxx_get_ethtool_stats(
628 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
629 mv88e6xxx_hw_stats
, port
, data
);
631 _mv88e6xxx_get_ethtool_stats(
632 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
633 mv88e6xxx_hw_stats
, port
, data
);
636 int mv88e6xxx_get_regs_len(struct dsa_switch
*ds
, int port
)
638 return 32 * sizeof(u16
);
641 void mv88e6xxx_get_regs(struct dsa_switch
*ds
, int port
,
642 struct ethtool_regs
*regs
, void *_p
)
649 memset(p
, 0xff, 32 * sizeof(u16
));
651 for (i
= 0; i
< 32; i
++) {
654 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), i
);
660 #ifdef CONFIG_NET_DSA_HWMON
662 int mv88e6xxx_get_temp(struct dsa_switch
*ds
, int *temp
)
664 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
670 mutex_lock(&ps
->phy_mutex
);
672 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x6);
676 /* Enable temperature sensor */
677 ret
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
681 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
| (1 << 5));
685 /* Wait for temperature to stabilize */
686 usleep_range(10000, 12000);
688 val
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
694 /* Disable temperature sensor */
695 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
& ~(1 << 5));
699 *temp
= ((val
& 0x1f) - 5) * 5;
702 _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x0);
703 mutex_unlock(&ps
->phy_mutex
);
706 #endif /* CONFIG_NET_DSA_HWMON */
708 static int mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
, u16 mask
)
710 unsigned long timeout
= jiffies
+ HZ
/ 10;
712 while (time_before(jiffies
, timeout
)) {
715 ret
= REG_READ(reg
, offset
);
719 usleep_range(1000, 2000);
724 int mv88e6xxx_phy_wait(struct dsa_switch
*ds
)
726 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, 0x18, 0x8000);
729 int mv88e6xxx_eeprom_load_wait(struct dsa_switch
*ds
)
731 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, 0x14, 0x0800);
734 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch
*ds
)
736 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, 0x14, 0x8000);
739 /* Must be called with SMI lock held */
740 static int _mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
, u16 mask
)
742 unsigned long timeout
= jiffies
+ HZ
/ 10;
744 while (time_before(jiffies
, timeout
)) {
747 ret
= _mv88e6xxx_reg_read(ds
, reg
, offset
);
753 usleep_range(1000, 2000);
758 /* Must be called with SMI lock held */
759 static int _mv88e6xxx_atu_wait(struct dsa_switch
*ds
)
761 return _mv88e6xxx_wait(ds
, REG_GLOBAL
, 0x0b, ATU_BUSY
);
764 /* Must be called with phy mutex held */
765 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int addr
,
770 REG_WRITE(REG_GLOBAL2
, 0x18, 0x9800 | (addr
<< 5) | regnum
);
772 ret
= mv88e6xxx_phy_wait(ds
);
776 return REG_READ(REG_GLOBAL2
, 0x19);
779 /* Must be called with phy mutex held */
780 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int addr
,
783 REG_WRITE(REG_GLOBAL2
, 0x19, val
);
784 REG_WRITE(REG_GLOBAL2
, 0x18, 0x9400 | (addr
<< 5) | regnum
);
786 return mv88e6xxx_phy_wait(ds
);
789 int mv88e6xxx_get_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
791 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
794 mutex_lock(&ps
->phy_mutex
);
796 reg
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
800 e
->eee_enabled
= !!(reg
& 0x0200);
801 e
->tx_lpi_enabled
= !!(reg
& 0x0100);
803 reg
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), 0);
807 e
->eee_active
= !!(reg
& 0x0040);
811 mutex_unlock(&ps
->phy_mutex
);
815 int mv88e6xxx_set_eee(struct dsa_switch
*ds
, int port
,
816 struct phy_device
*phydev
, struct ethtool_eee
*e
)
818 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
822 mutex_lock(&ps
->phy_mutex
);
824 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
831 if (e
->tx_lpi_enabled
)
834 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 16, reg
);
836 mutex_unlock(&ps
->phy_mutex
);
841 static int _mv88e6xxx_atu_cmd(struct dsa_switch
*ds
, int fid
, u16 cmd
)
845 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, 0x01, fid
);
849 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, 0x0b, cmd
);
853 return _mv88e6xxx_atu_wait(ds
);
856 static int _mv88e6xxx_flush_fid(struct dsa_switch
*ds
, int fid
)
860 ret
= _mv88e6xxx_atu_wait(ds
);
864 return _mv88e6xxx_atu_cmd(ds
, fid
, ATU_CMD_FLUSH_NONSTATIC_FID
);
867 static int mv88e6xxx_set_port_state(struct dsa_switch
*ds
, int port
, u8 state
)
869 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
873 mutex_lock(&ps
->smi_mutex
);
875 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), 0x04);
879 oldstate
= reg
& PSTATE_MASK
;
880 if (oldstate
!= state
) {
881 /* Flush forwarding database if we're moving a port
882 * from Learning or Forwarding state to Disabled or
883 * Blocking or Listening state.
885 if (oldstate
>= PSTATE_LEARNING
&& state
<= PSTATE_BLOCKING
) {
886 ret
= _mv88e6xxx_flush_fid(ds
, ps
->fid
[port
]);
890 reg
= (reg
& ~PSTATE_MASK
) | state
;
891 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), 0x04, reg
);
895 mutex_unlock(&ps
->smi_mutex
);
899 /* Must be called with smi lock held */
900 static int _mv88e6xxx_update_port_config(struct dsa_switch
*ds
, int port
)
902 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
903 u8 fid
= ps
->fid
[port
];
906 if (dsa_is_cpu_port(ds
, port
))
907 reg
|= ds
->phys_port_mask
;
909 reg
|= (ps
->bridge_mask
[fid
] |
910 (1 << dsa_upstream_port(ds
))) & ~(1 << port
);
912 return _mv88e6xxx_reg_write(ds
, REG_PORT(port
), 0x06, reg
);
915 /* Must be called with smi lock held */
916 static int _mv88e6xxx_update_bridge_config(struct dsa_switch
*ds
, int fid
)
918 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
923 mask
= ds
->phys_port_mask
;
926 mask
&= ~(1 << port
);
927 if (ps
->fid
[port
] != fid
)
930 ret
= _mv88e6xxx_update_port_config(ds
, port
);
935 return _mv88e6xxx_flush_fid(ds
, fid
);
938 /* Bridge handling functions */
940 int mv88e6xxx_join_bridge(struct dsa_switch
*ds
, int port
, u32 br_port_mask
)
942 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
947 /* If the bridge group is not empty, join that group.
948 * Otherwise create a new group.
951 nmask
= br_port_mask
& ~(1 << port
);
953 fid
= ps
->fid
[__ffs(nmask
)];
955 nmask
= ps
->bridge_mask
[fid
] | (1 << port
);
956 if (nmask
!= br_port_mask
) {
957 netdev_err(ds
->ports
[port
],
958 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
959 fid
, br_port_mask
, nmask
);
963 mutex_lock(&ps
->smi_mutex
);
965 ps
->bridge_mask
[fid
] = br_port_mask
;
967 if (fid
!= ps
->fid
[port
]) {
968 ps
->fid_mask
|= 1 << ps
->fid
[port
];
970 ret
= _mv88e6xxx_update_bridge_config(ds
, fid
);
973 mutex_unlock(&ps
->smi_mutex
);
978 int mv88e6xxx_leave_bridge(struct dsa_switch
*ds
, int port
, u32 br_port_mask
)
980 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
986 if (ps
->bridge_mask
[fid
] != br_port_mask
) {
987 netdev_err(ds
->ports
[port
],
988 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
989 fid
, br_port_mask
, ps
->bridge_mask
[fid
]);
993 /* If the port was the last port of a bridge, we are done.
994 * Otherwise assign a new fid to the port, and fix up
995 * the bridge configuration.
997 if (br_port_mask
== (1 << port
))
1000 mutex_lock(&ps
->smi_mutex
);
1002 newfid
= __ffs(ps
->fid_mask
);
1003 ps
->fid
[port
] = newfid
;
1004 ps
->fid_mask
&= (1 << newfid
);
1005 ps
->bridge_mask
[fid
] &= ~(1 << port
);
1006 ps
->bridge_mask
[newfid
] = 1 << port
;
1008 ret
= _mv88e6xxx_update_bridge_config(ds
, fid
);
1010 ret
= _mv88e6xxx_update_bridge_config(ds
, newfid
);
1012 mutex_unlock(&ps
->smi_mutex
);
1017 int mv88e6xxx_port_stp_update(struct dsa_switch
*ds
, int port
, u8 state
)
1019 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1023 case BR_STATE_DISABLED
:
1024 stp_state
= PSTATE_DISABLED
;
1026 case BR_STATE_BLOCKING
:
1027 case BR_STATE_LISTENING
:
1028 stp_state
= PSTATE_BLOCKING
;
1030 case BR_STATE_LEARNING
:
1031 stp_state
= PSTATE_LEARNING
;
1033 case BR_STATE_FORWARDING
:
1035 stp_state
= PSTATE_FORWARDING
;
1039 netdev_dbg(ds
->ports
[port
], "port state %d [%d]\n", state
, stp_state
);
1041 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1042 * so we can not update the port state directly but need to schedule it.
1044 ps
->port_state
[port
] = stp_state
;
1045 set_bit(port
, &ps
->port_state_update_mask
);
1046 schedule_work(&ps
->bridge_work
);
1051 static int __mv88e6xxx_write_addr(struct dsa_switch
*ds
,
1052 const unsigned char *addr
)
1056 for (i
= 0; i
< 3; i
++) {
1057 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, 0x0d + i
,
1058 (addr
[i
* 2] << 8) | addr
[i
* 2 + 1]);
1066 static int __mv88e6xxx_read_addr(struct dsa_switch
*ds
, unsigned char *addr
)
1070 for (i
= 0; i
< 3; i
++) {
1071 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, 0x0d + i
);
1074 addr
[i
* 2] = ret
>> 8;
1075 addr
[i
* 2 + 1] = ret
& 0xff;
1081 static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch
*ds
, int port
,
1082 const unsigned char *addr
, int state
)
1084 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1085 u8 fid
= ps
->fid
[port
];
1088 ret
= _mv88e6xxx_atu_wait(ds
);
1092 ret
= __mv88e6xxx_write_addr(ds
, addr
);
1096 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, 0x0c,
1097 (0x10 << port
) | state
);
1101 ret
= _mv88e6xxx_atu_cmd(ds
, fid
, ATU_CMD_LOAD_FID
);
1106 int mv88e6xxx_port_fdb_add(struct dsa_switch
*ds
, int port
,
1107 const unsigned char *addr
, u16 vid
)
1109 int state
= is_multicast_ether_addr(addr
) ?
1110 FDB_STATE_MC_STATIC
: FDB_STATE_STATIC
;
1111 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1114 mutex_lock(&ps
->smi_mutex
);
1115 ret
= __mv88e6xxx_port_fdb_cmd(ds
, port
, addr
, state
);
1116 mutex_unlock(&ps
->smi_mutex
);
1121 int mv88e6xxx_port_fdb_del(struct dsa_switch
*ds
, int port
,
1122 const unsigned char *addr
, u16 vid
)
1124 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1127 mutex_lock(&ps
->smi_mutex
);
1128 ret
= __mv88e6xxx_port_fdb_cmd(ds
, port
, addr
, FDB_STATE_UNUSED
);
1129 mutex_unlock(&ps
->smi_mutex
);
1134 static int __mv88e6xxx_port_getnext(struct dsa_switch
*ds
, int port
,
1135 unsigned char *addr
, bool *is_static
)
1137 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1138 u8 fid
= ps
->fid
[port
];
1141 ret
= _mv88e6xxx_atu_wait(ds
);
1145 ret
= __mv88e6xxx_write_addr(ds
, addr
);
1150 ret
= _mv88e6xxx_atu_cmd(ds
, fid
, ATU_CMD_GETNEXT_FID
);
1154 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, 0x0c);
1157 state
= ret
& FDB_STATE_MASK
;
1158 if (state
== FDB_STATE_UNUSED
)
1160 } while (!(((ret
>> 4) & 0xff) & (1 << port
)));
1162 ret
= __mv88e6xxx_read_addr(ds
, addr
);
1166 *is_static
= state
== (is_multicast_ether_addr(addr
) ?
1167 FDB_STATE_MC_STATIC
: FDB_STATE_STATIC
);
1172 /* get next entry for port */
1173 int mv88e6xxx_port_fdb_getnext(struct dsa_switch
*ds
, int port
,
1174 unsigned char *addr
, bool *is_static
)
1176 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1179 mutex_lock(&ps
->smi_mutex
);
1180 ret
= __mv88e6xxx_port_getnext(ds
, port
, addr
, is_static
);
1181 mutex_unlock(&ps
->smi_mutex
);
1186 static void mv88e6xxx_bridge_work(struct work_struct
*work
)
1188 struct mv88e6xxx_priv_state
*ps
;
1189 struct dsa_switch
*ds
;
1192 ps
= container_of(work
, struct mv88e6xxx_priv_state
, bridge_work
);
1193 ds
= ((struct dsa_switch
*)ps
) - 1;
1195 while (ps
->port_state_update_mask
) {
1196 port
= __ffs(ps
->port_state_update_mask
);
1197 clear_bit(port
, &ps
->port_state_update_mask
);
1198 mv88e6xxx_set_port_state(ds
, port
, ps
->port_state
[port
]);
1202 int mv88e6xxx_setup_port_common(struct dsa_switch
*ds
, int port
)
1204 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1207 mutex_lock(&ps
->smi_mutex
);
1209 /* Port Control 1: disable trunking, disable sending
1210 * learning messages to this port.
1212 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), 0x05, 0x0000);
1216 /* Port based VLAN map: give each port its own address
1217 * database, allow the CPU port to talk to each of the 'real'
1218 * ports, and allow each of the 'real' ports to only talk to
1219 * the upstream port.
1221 fid
= __ffs(ps
->fid_mask
);
1222 ps
->fid
[port
] = fid
;
1223 ps
->fid_mask
&= ~(1 << fid
);
1225 if (!dsa_is_cpu_port(ds
, port
))
1226 ps
->bridge_mask
[fid
] = 1 << port
;
1228 ret
= _mv88e6xxx_update_port_config(ds
, port
);
1232 /* Default VLAN ID and priority: don't set a default VLAN
1233 * ID, and set the default packet priority to zero.
1235 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), 0x07, 0x0000);
1237 mutex_unlock(&ps
->smi_mutex
);
1241 int mv88e6xxx_setup_common(struct dsa_switch
*ds
)
1243 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1245 mutex_init(&ps
->smi_mutex
);
1246 mutex_init(&ps
->stats_mutex
);
1247 mutex_init(&ps
->phy_mutex
);
1249 ps
->id
= REG_READ(REG_PORT(0), 0x03) & 0xfff0;
1251 ps
->fid_mask
= (1 << DSA_MAX_PORTS
) - 1;
1253 INIT_WORK(&ps
->bridge_work
, mv88e6xxx_bridge_work
);
1258 int mv88e6xxx_switch_reset(struct dsa_switch
*ds
, bool ppu_active
)
1260 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1261 u16 is_reset
= (ppu_active
? 0x8800 : 0xc800);
1262 unsigned long timeout
;
1266 /* Set all ports to the disabled state. */
1267 for (i
= 0; i
< ps
->num_ports
; i
++) {
1268 ret
= REG_READ(REG_PORT(i
), 0x04);
1269 REG_WRITE(REG_PORT(i
), 0x04, ret
& 0xfffc);
1272 /* Wait for transmit queues to drain. */
1273 usleep_range(2000, 4000);
1275 /* Reset the switch. Keep the PPU active if requested. The PPU
1276 * needs to be active to support indirect phy register access
1277 * through global registers 0x18 and 0x19.
1280 REG_WRITE(REG_GLOBAL
, 0x04, 0xc000);
1282 REG_WRITE(REG_GLOBAL
, 0x04, 0xc400);
1284 /* Wait up to one second for reset to complete. */
1285 timeout
= jiffies
+ 1 * HZ
;
1286 while (time_before(jiffies
, timeout
)) {
1287 ret
= REG_READ(REG_GLOBAL
, 0x00);
1288 if ((ret
& is_reset
) == is_reset
)
1290 usleep_range(1000, 2000);
1292 if (time_after(jiffies
, timeout
))
1298 int mv88e6xxx_phy_page_read(struct dsa_switch
*ds
, int port
, int page
, int reg
)
1300 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1303 mutex_lock(&ps
->phy_mutex
);
1304 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
1307 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, reg
);
1309 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
1310 mutex_unlock(&ps
->phy_mutex
);
1314 int mv88e6xxx_phy_page_write(struct dsa_switch
*ds
, int port
, int page
,
1317 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1320 mutex_lock(&ps
->phy_mutex
);
1321 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
1325 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, reg
, val
);
1327 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
1328 mutex_unlock(&ps
->phy_mutex
);
1332 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch
*ds
, int port
)
1334 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1336 if (port
>= 0 && port
< ps
->num_ports
)
1342 mv88e6xxx_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
1344 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1345 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
1351 mutex_lock(&ps
->phy_mutex
);
1352 ret
= _mv88e6xxx_phy_read(ds
, addr
, regnum
);
1353 mutex_unlock(&ps
->phy_mutex
);
1358 mv88e6xxx_phy_write(struct dsa_switch
*ds
, int port
, int regnum
, u16 val
)
1360 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1361 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
1367 mutex_lock(&ps
->phy_mutex
);
1368 ret
= _mv88e6xxx_phy_write(ds
, addr
, regnum
, val
);
1369 mutex_unlock(&ps
->phy_mutex
);
1374 mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int port
, int regnum
)
1376 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1377 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
1383 mutex_lock(&ps
->phy_mutex
);
1384 ret
= _mv88e6xxx_phy_read_indirect(ds
, addr
, regnum
);
1385 mutex_unlock(&ps
->phy_mutex
);
1390 mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int port
, int regnum
,
1393 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1394 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
1400 mutex_lock(&ps
->phy_mutex
);
1401 ret
= _mv88e6xxx_phy_write_indirect(ds
, addr
, regnum
, val
);
1402 mutex_unlock(&ps
->phy_mutex
);
1406 static int __init
mv88e6xxx_init(void)
1408 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1409 register_switch_driver(&mv88e6131_switch_driver
);
1411 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1412 register_switch_driver(&mv88e6123_61_65_switch_driver
);
1414 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
1415 register_switch_driver(&mv88e6352_switch_driver
);
1417 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1418 register_switch_driver(&mv88e6171_switch_driver
);
1422 module_init(mv88e6xxx_init
);
1424 static void __exit
mv88e6xxx_cleanup(void)
1426 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1427 unregister_switch_driver(&mv88e6171_switch_driver
);
1429 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1430 unregister_switch_driver(&mv88e6123_61_65_switch_driver
);
1432 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1433 unregister_switch_driver(&mv88e6131_switch_driver
);
1436 module_exit(mv88e6xxx_cleanup
);
1438 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1439 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
1440 MODULE_LICENSE("GPL");