net: dsa: Consolidate getting the statistics
[linux-2.6/btrfs-unstable.git] / drivers / net / dsa / mv88e6xxx.c
blobb360fe5346d34a9302c858e54906b860caaab580
1 /*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
11 #include <linux/delay.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/jiffies.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/phy.h>
19 #include <net/dsa.h>
20 #include "mv88e6xxx.h"
22 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
23 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
24 * will be directly accessible on some {device address,register address}
25 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
26 * will only respond to SMI transactions to that specific address, and
27 * an indirect addressing mechanism needs to be used to access its
28 * registers.
30 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
32 int ret;
33 int i;
35 for (i = 0; i < 16; i++) {
36 ret = mdiobus_read(bus, sw_addr, 0);
37 if (ret < 0)
38 return ret;
40 if ((ret & 0x8000) == 0)
41 return 0;
44 return -ETIMEDOUT;
47 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
49 int ret;
51 if (sw_addr == 0)
52 return mdiobus_read(bus, addr, reg);
54 /* Wait for the bus to become free. */
55 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
56 if (ret < 0)
57 return ret;
59 /* Transmit the read command. */
60 ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
61 if (ret < 0)
62 return ret;
64 /* Wait for the read command to complete. */
65 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
66 if (ret < 0)
67 return ret;
69 /* Read the data. */
70 ret = mdiobus_read(bus, sw_addr, 1);
71 if (ret < 0)
72 return ret;
74 return ret & 0xffff;
77 /* Must be called with SMI mutex held */
78 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
80 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
81 int ret;
83 if (bus == NULL)
84 return -EINVAL;
86 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
87 if (ret < 0)
88 return ret;
90 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
91 addr, reg, ret);
93 return ret;
96 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
98 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
99 int ret;
101 mutex_lock(&ps->smi_mutex);
102 ret = _mv88e6xxx_reg_read(ds, addr, reg);
103 mutex_unlock(&ps->smi_mutex);
105 return ret;
108 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
109 int reg, u16 val)
111 int ret;
113 if (sw_addr == 0)
114 return mdiobus_write(bus, addr, reg, val);
116 /* Wait for the bus to become free. */
117 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
118 if (ret < 0)
119 return ret;
121 /* Transmit the data to write. */
122 ret = mdiobus_write(bus, sw_addr, 1, val);
123 if (ret < 0)
124 return ret;
126 /* Transmit the write command. */
127 ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
128 if (ret < 0)
129 return ret;
131 /* Wait for the write command to complete. */
132 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
133 if (ret < 0)
134 return ret;
136 return 0;
139 /* Must be called with SMI mutex held */
140 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
141 u16 val)
143 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
145 if (bus == NULL)
146 return -EINVAL;
148 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
149 addr, reg, val);
151 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
154 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
156 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
157 int ret;
159 mutex_lock(&ps->smi_mutex);
160 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
161 mutex_unlock(&ps->smi_mutex);
163 return ret;
166 int mv88e6xxx_config_prio(struct dsa_switch *ds)
168 /* Configure the IP ToS mapping registers. */
169 REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
170 REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
171 REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
172 REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
173 REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
174 REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
175 REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
176 REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
178 /* Configure the IEEE 802.1p priority mapping register. */
179 REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
181 return 0;
184 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
186 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
187 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
188 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
190 return 0;
193 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
195 int i;
196 int ret;
198 for (i = 0; i < 6; i++) {
199 int j;
201 /* Write the MAC address byte. */
202 REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
204 /* Wait for the write to complete. */
205 for (j = 0; j < 16; j++) {
206 ret = REG_READ(REG_GLOBAL2, 0x0d);
207 if ((ret & 0x8000) == 0)
208 break;
210 if (j == 16)
211 return -ETIMEDOUT;
214 return 0;
217 /* Must be called with phy mutex held */
218 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
220 if (addr >= 0)
221 return mv88e6xxx_reg_read(ds, addr, regnum);
222 return 0xffff;
225 /* Must be called with phy mutex held */
226 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
227 u16 val)
229 if (addr >= 0)
230 return mv88e6xxx_reg_write(ds, addr, regnum, val);
231 return 0;
234 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
235 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
237 int ret;
238 unsigned long timeout;
240 ret = REG_READ(REG_GLOBAL, 0x04);
241 REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
243 timeout = jiffies + 1 * HZ;
244 while (time_before(jiffies, timeout)) {
245 ret = REG_READ(REG_GLOBAL, 0x00);
246 usleep_range(1000, 2000);
247 if ((ret & 0xc000) != 0xc000)
248 return 0;
251 return -ETIMEDOUT;
254 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
256 int ret;
257 unsigned long timeout;
259 ret = REG_READ(REG_GLOBAL, 0x04);
260 REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
262 timeout = jiffies + 1 * HZ;
263 while (time_before(jiffies, timeout)) {
264 ret = REG_READ(REG_GLOBAL, 0x00);
265 usleep_range(1000, 2000);
266 if ((ret & 0xc000) == 0xc000)
267 return 0;
270 return -ETIMEDOUT;
273 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
275 struct mv88e6xxx_priv_state *ps;
277 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
278 if (mutex_trylock(&ps->ppu_mutex)) {
279 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
281 if (mv88e6xxx_ppu_enable(ds) == 0)
282 ps->ppu_disabled = 0;
283 mutex_unlock(&ps->ppu_mutex);
287 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
289 struct mv88e6xxx_priv_state *ps = (void *)_ps;
291 schedule_work(&ps->ppu_work);
294 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
296 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
297 int ret;
299 mutex_lock(&ps->ppu_mutex);
301 /* If the PHY polling unit is enabled, disable it so that
302 * we can access the PHY registers. If it was already
303 * disabled, cancel the timer that is going to re-enable
304 * it.
306 if (!ps->ppu_disabled) {
307 ret = mv88e6xxx_ppu_disable(ds);
308 if (ret < 0) {
309 mutex_unlock(&ps->ppu_mutex);
310 return ret;
312 ps->ppu_disabled = 1;
313 } else {
314 del_timer(&ps->ppu_timer);
315 ret = 0;
318 return ret;
321 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
323 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
325 /* Schedule a timer to re-enable the PHY polling unit. */
326 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
327 mutex_unlock(&ps->ppu_mutex);
330 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
332 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
334 mutex_init(&ps->ppu_mutex);
335 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
336 init_timer(&ps->ppu_timer);
337 ps->ppu_timer.data = (unsigned long)ps;
338 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
341 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
343 int ret;
345 ret = mv88e6xxx_ppu_access_get(ds);
346 if (ret >= 0) {
347 ret = mv88e6xxx_reg_read(ds, addr, regnum);
348 mv88e6xxx_ppu_access_put(ds);
351 return ret;
354 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
355 int regnum, u16 val)
357 int ret;
359 ret = mv88e6xxx_ppu_access_get(ds);
360 if (ret >= 0) {
361 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
362 mv88e6xxx_ppu_access_put(ds);
365 return ret;
367 #endif
369 void mv88e6xxx_poll_link(struct dsa_switch *ds)
371 int i;
373 for (i = 0; i < DSA_MAX_PORTS; i++) {
374 struct net_device *dev;
375 int uninitialized_var(port_status);
376 int link;
377 int speed;
378 int duplex;
379 int fc;
381 dev = ds->ports[i];
382 if (dev == NULL)
383 continue;
385 link = 0;
386 if (dev->flags & IFF_UP) {
387 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
388 if (port_status < 0)
389 continue;
391 link = !!(port_status & 0x0800);
394 if (!link) {
395 if (netif_carrier_ok(dev)) {
396 netdev_info(dev, "link down\n");
397 netif_carrier_off(dev);
399 continue;
402 switch (port_status & 0x0300) {
403 case 0x0000:
404 speed = 10;
405 break;
406 case 0x0100:
407 speed = 100;
408 break;
409 case 0x0200:
410 speed = 1000;
411 break;
412 default:
413 speed = -1;
414 break;
416 duplex = (port_status & 0x0400) ? 1 : 0;
417 fc = (port_status & 0x8000) ? 1 : 0;
419 if (!netif_carrier_ok(dev)) {
420 netdev_info(dev,
421 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
422 speed,
423 duplex ? "full" : "half",
424 fc ? "en" : "dis");
425 netif_carrier_on(dev);
430 static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
432 int ret;
433 int i;
435 for (i = 0; i < 10; i++) {
436 ret = REG_READ(REG_GLOBAL, 0x1d);
437 if ((ret & 0x8000) == 0)
438 return 0;
441 return -ETIMEDOUT;
444 static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
446 int ret;
448 /* Snapshot the hardware statistics counters for this port. */
449 REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
451 /* Wait for the snapshotting to complete. */
452 ret = mv88e6xxx_stats_wait(ds);
453 if (ret < 0)
454 return ret;
456 return 0;
459 static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
461 u32 _val;
462 int ret;
464 *val = 0;
466 ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
467 if (ret < 0)
468 return;
470 ret = mv88e6xxx_stats_wait(ds);
471 if (ret < 0)
472 return;
474 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
475 if (ret < 0)
476 return;
478 _val = ret << 16;
480 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
481 if (ret < 0)
482 return;
484 *val = _val | ret;
487 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
488 { "in_good_octets", 8, 0x00, },
489 { "in_bad_octets", 4, 0x02, },
490 { "in_unicast", 4, 0x04, },
491 { "in_broadcasts", 4, 0x06, },
492 { "in_multicasts", 4, 0x07, },
493 { "in_pause", 4, 0x16, },
494 { "in_undersize", 4, 0x18, },
495 { "in_fragments", 4, 0x19, },
496 { "in_oversize", 4, 0x1a, },
497 { "in_jabber", 4, 0x1b, },
498 { "in_rx_error", 4, 0x1c, },
499 { "in_fcs_error", 4, 0x1d, },
500 { "out_octets", 8, 0x0e, },
501 { "out_unicast", 4, 0x10, },
502 { "out_broadcasts", 4, 0x13, },
503 { "out_multicasts", 4, 0x12, },
504 { "out_pause", 4, 0x15, },
505 { "excessive", 4, 0x11, },
506 { "collisions", 4, 0x1e, },
507 { "deferred", 4, 0x05, },
508 { "single", 4, 0x14, },
509 { "multiple", 4, 0x17, },
510 { "out_fcs_error", 4, 0x03, },
511 { "late", 4, 0x1f, },
512 { "hist_64bytes", 4, 0x08, },
513 { "hist_65_127bytes", 4, 0x09, },
514 { "hist_128_255bytes", 4, 0x0a, },
515 { "hist_256_511bytes", 4, 0x0b, },
516 { "hist_512_1023bytes", 4, 0x0c, },
517 { "hist_1024_max_bytes", 4, 0x0d, },
518 /* Not all devices have the following counters */
519 { "sw_in_discards", 4, 0x110, },
520 { "sw_in_filtered", 2, 0x112, },
521 { "sw_out_filtered", 2, 0x113, },
525 static bool have_sw_in_discards(struct dsa_switch *ds)
527 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
529 switch (ps->id) {
530 case ID_6095: case ID_6161: case ID_6165:
531 case ID_6171: case ID_6172: case ID_6176:
532 case ID_6182: case ID_6185: case ID_6352:
533 return true;
534 default:
535 return false;
539 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
540 int nr_stats,
541 struct mv88e6xxx_hw_stat *stats,
542 int port, uint8_t *data)
544 int i;
546 for (i = 0; i < nr_stats; i++) {
547 memcpy(data + i * ETH_GSTRING_LEN,
548 stats[i].string, ETH_GSTRING_LEN);
552 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
553 int nr_stats,
554 struct mv88e6xxx_hw_stat *stats,
555 int port, uint64_t *data)
557 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
558 int ret;
559 int i;
561 mutex_lock(&ps->stats_mutex);
563 ret = mv88e6xxx_stats_snapshot(ds, port);
564 if (ret < 0) {
565 mutex_unlock(&ps->stats_mutex);
566 return;
569 /* Read each of the counters. */
570 for (i = 0; i < nr_stats; i++) {
571 struct mv88e6xxx_hw_stat *s = stats + i;
572 u32 low;
573 u32 high = 0;
575 if (s->reg >= 0x100) {
576 int ret;
578 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
579 s->reg - 0x100);
580 if (ret < 0)
581 goto error;
582 low = ret;
583 if (s->sizeof_stat == 4) {
584 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
585 s->reg - 0x100 + 1);
586 if (ret < 0)
587 goto error;
588 high = ret;
590 data[i] = (((u64)high) << 16) | low;
591 continue;
593 mv88e6xxx_stats_read(ds, s->reg, &low);
594 if (s->sizeof_stat == 8)
595 mv88e6xxx_stats_read(ds, s->reg + 1, &high);
597 data[i] = (((u64)high) << 32) | low;
599 error:
600 mutex_unlock(&ps->stats_mutex);
603 /* All the statistics in the table */
604 void
605 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
607 if (have_sw_in_discards(ds))
608 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
609 mv88e6xxx_hw_stats, port, data);
610 else
611 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
612 mv88e6xxx_hw_stats, port, data);
615 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
617 if (have_sw_in_discards(ds))
618 return ARRAY_SIZE(mv88e6xxx_hw_stats);
619 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
622 void
623 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
624 int port, uint64_t *data)
626 if (have_sw_in_discards(ds))
627 _mv88e6xxx_get_ethtool_stats(
628 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
629 mv88e6xxx_hw_stats, port, data);
630 else
631 _mv88e6xxx_get_ethtool_stats(
632 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
633 mv88e6xxx_hw_stats, port, data);
636 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
638 return 32 * sizeof(u16);
641 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
642 struct ethtool_regs *regs, void *_p)
644 u16 *p = _p;
645 int i;
647 regs->version = 0;
649 memset(p, 0xff, 32 * sizeof(u16));
651 for (i = 0; i < 32; i++) {
652 int ret;
654 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
655 if (ret >= 0)
656 p[i] = ret;
660 #ifdef CONFIG_NET_DSA_HWMON
662 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
664 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
665 int ret;
666 int val;
668 *temp = 0;
670 mutex_lock(&ps->phy_mutex);
672 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
673 if (ret < 0)
674 goto error;
676 /* Enable temperature sensor */
677 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
678 if (ret < 0)
679 goto error;
681 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
682 if (ret < 0)
683 goto error;
685 /* Wait for temperature to stabilize */
686 usleep_range(10000, 12000);
688 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
689 if (val < 0) {
690 ret = val;
691 goto error;
694 /* Disable temperature sensor */
695 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
696 if (ret < 0)
697 goto error;
699 *temp = ((val & 0x1f) - 5) * 5;
701 error:
702 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
703 mutex_unlock(&ps->phy_mutex);
704 return ret;
706 #endif /* CONFIG_NET_DSA_HWMON */
708 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
710 unsigned long timeout = jiffies + HZ / 10;
712 while (time_before(jiffies, timeout)) {
713 int ret;
715 ret = REG_READ(reg, offset);
716 if (!(ret & mask))
717 return 0;
719 usleep_range(1000, 2000);
721 return -ETIMEDOUT;
724 int mv88e6xxx_phy_wait(struct dsa_switch *ds)
726 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
729 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
731 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
734 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
736 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
739 /* Must be called with SMI lock held */
740 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
742 unsigned long timeout = jiffies + HZ / 10;
744 while (time_before(jiffies, timeout)) {
745 int ret;
747 ret = _mv88e6xxx_reg_read(ds, reg, offset);
748 if (ret < 0)
749 return ret;
750 if (!(ret & mask))
751 return 0;
753 usleep_range(1000, 2000);
755 return -ETIMEDOUT;
758 /* Must be called with SMI lock held */
759 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
761 return _mv88e6xxx_wait(ds, REG_GLOBAL, 0x0b, ATU_BUSY);
764 /* Must be called with phy mutex held */
765 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
766 int regnum)
768 int ret;
770 REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
772 ret = mv88e6xxx_phy_wait(ds);
773 if (ret < 0)
774 return ret;
776 return REG_READ(REG_GLOBAL2, 0x19);
779 /* Must be called with phy mutex held */
780 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
781 int regnum, u16 val)
783 REG_WRITE(REG_GLOBAL2, 0x19, val);
784 REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
786 return mv88e6xxx_phy_wait(ds);
789 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
791 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
792 int reg;
794 mutex_lock(&ps->phy_mutex);
796 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
797 if (reg < 0)
798 goto out;
800 e->eee_enabled = !!(reg & 0x0200);
801 e->tx_lpi_enabled = !!(reg & 0x0100);
803 reg = mv88e6xxx_reg_read(ds, REG_PORT(port), 0);
804 if (reg < 0)
805 goto out;
807 e->eee_active = !!(reg & 0x0040);
808 reg = 0;
810 out:
811 mutex_unlock(&ps->phy_mutex);
812 return reg;
815 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
816 struct phy_device *phydev, struct ethtool_eee *e)
818 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
819 int reg;
820 int ret;
822 mutex_lock(&ps->phy_mutex);
824 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
825 if (ret < 0)
826 goto out;
828 reg = ret & ~0x0300;
829 if (e->eee_enabled)
830 reg |= 0x0200;
831 if (e->tx_lpi_enabled)
832 reg |= 0x0100;
834 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
835 out:
836 mutex_unlock(&ps->phy_mutex);
838 return ret;
841 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
843 int ret;
845 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
846 if (ret < 0)
847 return ret;
849 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0b, cmd);
850 if (ret < 0)
851 return ret;
853 return _mv88e6xxx_atu_wait(ds);
856 static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
858 int ret;
860 ret = _mv88e6xxx_atu_wait(ds);
861 if (ret < 0)
862 return ret;
864 return _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_FLUSH_NONSTATIC_FID);
867 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
869 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
870 int reg, ret;
871 u8 oldstate;
873 mutex_lock(&ps->smi_mutex);
875 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), 0x04);
876 if (reg < 0)
877 goto abort;
879 oldstate = reg & PSTATE_MASK;
880 if (oldstate != state) {
881 /* Flush forwarding database if we're moving a port
882 * from Learning or Forwarding state to Disabled or
883 * Blocking or Listening state.
885 if (oldstate >= PSTATE_LEARNING && state <= PSTATE_BLOCKING) {
886 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
887 if (ret)
888 goto abort;
890 reg = (reg & ~PSTATE_MASK) | state;
891 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x04, reg);
894 abort:
895 mutex_unlock(&ps->smi_mutex);
896 return ret;
899 /* Must be called with smi lock held */
900 static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
902 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
903 u8 fid = ps->fid[port];
904 u16 reg = fid << 12;
906 if (dsa_is_cpu_port(ds, port))
907 reg |= ds->phys_port_mask;
908 else
909 reg |= (ps->bridge_mask[fid] |
910 (1 << dsa_upstream_port(ds))) & ~(1 << port);
912 return _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x06, reg);
915 /* Must be called with smi lock held */
916 static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
918 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
919 int port;
920 u32 mask;
921 int ret;
923 mask = ds->phys_port_mask;
924 while (mask) {
925 port = __ffs(mask);
926 mask &= ~(1 << port);
927 if (ps->fid[port] != fid)
928 continue;
930 ret = _mv88e6xxx_update_port_config(ds, port);
931 if (ret)
932 return ret;
935 return _mv88e6xxx_flush_fid(ds, fid);
938 /* Bridge handling functions */
940 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
942 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
943 int ret = 0;
944 u32 nmask;
945 int fid;
947 /* If the bridge group is not empty, join that group.
948 * Otherwise create a new group.
950 fid = ps->fid[port];
951 nmask = br_port_mask & ~(1 << port);
952 if (nmask)
953 fid = ps->fid[__ffs(nmask)];
955 nmask = ps->bridge_mask[fid] | (1 << port);
956 if (nmask != br_port_mask) {
957 netdev_err(ds->ports[port],
958 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
959 fid, br_port_mask, nmask);
960 return -EINVAL;
963 mutex_lock(&ps->smi_mutex);
965 ps->bridge_mask[fid] = br_port_mask;
967 if (fid != ps->fid[port]) {
968 ps->fid_mask |= 1 << ps->fid[port];
969 ps->fid[port] = fid;
970 ret = _mv88e6xxx_update_bridge_config(ds, fid);
973 mutex_unlock(&ps->smi_mutex);
975 return ret;
978 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
980 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
981 u8 fid, newfid;
982 int ret;
984 fid = ps->fid[port];
986 if (ps->bridge_mask[fid] != br_port_mask) {
987 netdev_err(ds->ports[port],
988 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
989 fid, br_port_mask, ps->bridge_mask[fid]);
990 return -EINVAL;
993 /* If the port was the last port of a bridge, we are done.
994 * Otherwise assign a new fid to the port, and fix up
995 * the bridge configuration.
997 if (br_port_mask == (1 << port))
998 return 0;
1000 mutex_lock(&ps->smi_mutex);
1002 newfid = __ffs(ps->fid_mask);
1003 ps->fid[port] = newfid;
1004 ps->fid_mask &= (1 << newfid);
1005 ps->bridge_mask[fid] &= ~(1 << port);
1006 ps->bridge_mask[newfid] = 1 << port;
1008 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1009 if (!ret)
1010 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1012 mutex_unlock(&ps->smi_mutex);
1014 return ret;
1017 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1019 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1020 int stp_state;
1022 switch (state) {
1023 case BR_STATE_DISABLED:
1024 stp_state = PSTATE_DISABLED;
1025 break;
1026 case BR_STATE_BLOCKING:
1027 case BR_STATE_LISTENING:
1028 stp_state = PSTATE_BLOCKING;
1029 break;
1030 case BR_STATE_LEARNING:
1031 stp_state = PSTATE_LEARNING;
1032 break;
1033 case BR_STATE_FORWARDING:
1034 default:
1035 stp_state = PSTATE_FORWARDING;
1036 break;
1039 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1041 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1042 * so we can not update the port state directly but need to schedule it.
1044 ps->port_state[port] = stp_state;
1045 set_bit(port, &ps->port_state_update_mask);
1046 schedule_work(&ps->bridge_work);
1048 return 0;
1051 static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
1052 const unsigned char *addr)
1054 int i, ret;
1056 for (i = 0; i < 3; i++) {
1057 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0d + i,
1058 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1059 if (ret < 0)
1060 return ret;
1063 return 0;
1066 static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
1068 int i, ret;
1070 for (i = 0; i < 3; i++) {
1071 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0d + i);
1072 if (ret < 0)
1073 return ret;
1074 addr[i * 2] = ret >> 8;
1075 addr[i * 2 + 1] = ret & 0xff;
1078 return 0;
1081 static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
1082 const unsigned char *addr, int state)
1084 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1085 u8 fid = ps->fid[port];
1086 int ret;
1088 ret = _mv88e6xxx_atu_wait(ds);
1089 if (ret < 0)
1090 return ret;
1092 ret = __mv88e6xxx_write_addr(ds, addr);
1093 if (ret < 0)
1094 return ret;
1096 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0c,
1097 (0x10 << port) | state);
1098 if (ret)
1099 return ret;
1101 ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_LOAD_FID);
1103 return ret;
1106 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1107 const unsigned char *addr, u16 vid)
1109 int state = is_multicast_ether_addr(addr) ?
1110 FDB_STATE_MC_STATIC : FDB_STATE_STATIC;
1111 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1112 int ret;
1114 mutex_lock(&ps->smi_mutex);
1115 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
1116 mutex_unlock(&ps->smi_mutex);
1118 return ret;
1121 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1122 const unsigned char *addr, u16 vid)
1124 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1125 int ret;
1127 mutex_lock(&ps->smi_mutex);
1128 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, FDB_STATE_UNUSED);
1129 mutex_unlock(&ps->smi_mutex);
1131 return ret;
1134 static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
1135 unsigned char *addr, bool *is_static)
1137 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1138 u8 fid = ps->fid[port];
1139 int ret, state;
1141 ret = _mv88e6xxx_atu_wait(ds);
1142 if (ret < 0)
1143 return ret;
1145 ret = __mv88e6xxx_write_addr(ds, addr);
1146 if (ret < 0)
1147 return ret;
1149 do {
1150 ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_GETNEXT_FID);
1151 if (ret < 0)
1152 return ret;
1154 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0c);
1155 if (ret < 0)
1156 return ret;
1157 state = ret & FDB_STATE_MASK;
1158 if (state == FDB_STATE_UNUSED)
1159 return -ENOENT;
1160 } while (!(((ret >> 4) & 0xff) & (1 << port)));
1162 ret = __mv88e6xxx_read_addr(ds, addr);
1163 if (ret < 0)
1164 return ret;
1166 *is_static = state == (is_multicast_ether_addr(addr) ?
1167 FDB_STATE_MC_STATIC : FDB_STATE_STATIC);
1169 return 0;
1172 /* get next entry for port */
1173 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
1174 unsigned char *addr, bool *is_static)
1176 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1177 int ret;
1179 mutex_lock(&ps->smi_mutex);
1180 ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
1181 mutex_unlock(&ps->smi_mutex);
1183 return ret;
1186 static void mv88e6xxx_bridge_work(struct work_struct *work)
1188 struct mv88e6xxx_priv_state *ps;
1189 struct dsa_switch *ds;
1190 int port;
1192 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1193 ds = ((struct dsa_switch *)ps) - 1;
1195 while (ps->port_state_update_mask) {
1196 port = __ffs(ps->port_state_update_mask);
1197 clear_bit(port, &ps->port_state_update_mask);
1198 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1202 int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
1204 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1205 int ret, fid;
1207 mutex_lock(&ps->smi_mutex);
1209 /* Port Control 1: disable trunking, disable sending
1210 * learning messages to this port.
1212 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x05, 0x0000);
1213 if (ret)
1214 goto abort;
1216 /* Port based VLAN map: give each port its own address
1217 * database, allow the CPU port to talk to each of the 'real'
1218 * ports, and allow each of the 'real' ports to only talk to
1219 * the upstream port.
1221 fid = __ffs(ps->fid_mask);
1222 ps->fid[port] = fid;
1223 ps->fid_mask &= ~(1 << fid);
1225 if (!dsa_is_cpu_port(ds, port))
1226 ps->bridge_mask[fid] = 1 << port;
1228 ret = _mv88e6xxx_update_port_config(ds, port);
1229 if (ret)
1230 goto abort;
1232 /* Default VLAN ID and priority: don't set a default VLAN
1233 * ID, and set the default packet priority to zero.
1235 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x07, 0x0000);
1236 abort:
1237 mutex_unlock(&ps->smi_mutex);
1238 return ret;
1241 int mv88e6xxx_setup_common(struct dsa_switch *ds)
1243 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1245 mutex_init(&ps->smi_mutex);
1246 mutex_init(&ps->stats_mutex);
1247 mutex_init(&ps->phy_mutex);
1249 ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
1251 ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
1253 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
1255 return 0;
1258 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
1260 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1261 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
1262 unsigned long timeout;
1263 int ret;
1264 int i;
1266 /* Set all ports to the disabled state. */
1267 for (i = 0; i < ps->num_ports; i++) {
1268 ret = REG_READ(REG_PORT(i), 0x04);
1269 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
1272 /* Wait for transmit queues to drain. */
1273 usleep_range(2000, 4000);
1275 /* Reset the switch. Keep the PPU active if requested. The PPU
1276 * needs to be active to support indirect phy register access
1277 * through global registers 0x18 and 0x19.
1279 if (ppu_active)
1280 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
1281 else
1282 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
1284 /* Wait up to one second for reset to complete. */
1285 timeout = jiffies + 1 * HZ;
1286 while (time_before(jiffies, timeout)) {
1287 ret = REG_READ(REG_GLOBAL, 0x00);
1288 if ((ret & is_reset) == is_reset)
1289 break;
1290 usleep_range(1000, 2000);
1292 if (time_after(jiffies, timeout))
1293 return -ETIMEDOUT;
1295 return 0;
1298 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
1300 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1301 int ret;
1303 mutex_lock(&ps->phy_mutex);
1304 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1305 if (ret < 0)
1306 goto error;
1307 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
1308 error:
1309 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1310 mutex_unlock(&ps->phy_mutex);
1311 return ret;
1314 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
1315 int reg, int val)
1317 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1318 int ret;
1320 mutex_lock(&ps->phy_mutex);
1321 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1322 if (ret < 0)
1323 goto error;
1325 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
1326 error:
1327 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1328 mutex_unlock(&ps->phy_mutex);
1329 return ret;
1332 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
1334 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1336 if (port >= 0 && port < ps->num_ports)
1337 return port;
1338 return -EINVAL;
1342 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
1344 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1345 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1346 int ret;
1348 if (addr < 0)
1349 return addr;
1351 mutex_lock(&ps->phy_mutex);
1352 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
1353 mutex_unlock(&ps->phy_mutex);
1354 return ret;
1358 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
1360 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1361 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1362 int ret;
1364 if (addr < 0)
1365 return addr;
1367 mutex_lock(&ps->phy_mutex);
1368 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
1369 mutex_unlock(&ps->phy_mutex);
1370 return ret;
1374 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
1376 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1377 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1378 int ret;
1380 if (addr < 0)
1381 return addr;
1383 mutex_lock(&ps->phy_mutex);
1384 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
1385 mutex_unlock(&ps->phy_mutex);
1386 return ret;
1390 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
1391 u16 val)
1393 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1394 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1395 int ret;
1397 if (addr < 0)
1398 return addr;
1400 mutex_lock(&ps->phy_mutex);
1401 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
1402 mutex_unlock(&ps->phy_mutex);
1403 return ret;
1406 static int __init mv88e6xxx_init(void)
1408 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1409 register_switch_driver(&mv88e6131_switch_driver);
1410 #endif
1411 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1412 register_switch_driver(&mv88e6123_61_65_switch_driver);
1413 #endif
1414 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
1415 register_switch_driver(&mv88e6352_switch_driver);
1416 #endif
1417 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1418 register_switch_driver(&mv88e6171_switch_driver);
1419 #endif
1420 return 0;
1422 module_init(mv88e6xxx_init);
1424 static void __exit mv88e6xxx_cleanup(void)
1426 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1427 unregister_switch_driver(&mv88e6171_switch_driver);
1428 #endif
1429 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1430 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
1431 #endif
1432 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1433 unregister_switch_driver(&mv88e6131_switch_driver);
1434 #endif
1436 module_exit(mv88e6xxx_cleanup);
1438 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1439 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
1440 MODULE_LICENSE("GPL");