1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Keyur Chudgar <kchudgar@apm.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
24 #include "xgene_enet_xgmac.h"
26 static void xgene_enet_wr_csr(struct xgene_enet_pdata
*p
, u32 offset
, u32 val
)
28 iowrite32(val
, p
->eth_csr_addr
+ offset
);
31 static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata
*p
, u32 offset
,
34 iowrite32(val
, p
->base_addr
+ offset
);
37 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata
*p
,
40 iowrite32(val
, p
->eth_ring_if_addr
+ offset
);
43 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata
*p
,
46 iowrite32(val
, p
->eth_diag_csr_addr
+ offset
);
49 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata
*pdata
,
52 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
57 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl
*ctl
,
58 u32 wr_addr
, u32 wr_data
)
62 iowrite32(wr_addr
, ctl
->addr
);
63 iowrite32(wr_data
, ctl
->ctl
);
64 iowrite32(XGENE_ENET_WR_CMD
, ctl
->cmd
);
66 /* wait for write command to complete */
67 for (i
= 0; i
< 10; i
++) {
68 if (ioread32(ctl
->cmd_done
)) {
69 iowrite32(0, ctl
->cmd
);
78 static void xgene_enet_wr_mac(struct xgene_enet_pdata
*p
,
79 u32 wr_addr
, u32 wr_data
)
81 struct xgene_indirect_ctl ctl
= {
82 .addr
= p
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
,
83 .ctl
= p
->mcx_mac_addr
+ MAC_WRITE_REG_OFFSET
,
84 .cmd
= p
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
,
85 .cmd_done
= p
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
88 if (!xgene_enet_wr_indirect(&ctl
, wr_addr
, wr_data
))
89 netdev_err(p
->ndev
, "mac write failed, addr: %04x\n", wr_addr
);
92 static u32
xgene_enet_rd_csr(struct xgene_enet_pdata
*p
, u32 offset
)
94 return ioread32(p
->eth_csr_addr
+ offset
);
97 static u32
xgene_enet_rd_diag_csr(struct xgene_enet_pdata
*p
, u32 offset
)
99 return ioread32(p
->eth_diag_csr_addr
+ offset
);
102 static u32
xgene_enet_rd_mcx_csr(struct xgene_enet_pdata
*p
, u32 offset
)
104 return ioread32(p
->mcx_mac_csr_addr
+ offset
);
107 static u32
xgene_enet_rd_indirect(struct xgene_indirect_ctl
*ctl
, u32 rd_addr
)
112 iowrite32(rd_addr
, ctl
->addr
);
113 iowrite32(XGENE_ENET_RD_CMD
, ctl
->cmd
);
115 /* wait for read command to complete */
116 for (i
= 0; i
< 10; i
++) {
117 if (ioread32(ctl
->cmd_done
)) {
118 rd_data
= ioread32(ctl
->ctl
);
119 iowrite32(0, ctl
->cmd
);
126 pr_err("%s: mac read failed, addr: %04x\n", __func__
, rd_addr
);
131 static u32
xgene_enet_rd_mac(struct xgene_enet_pdata
*p
, u32 rd_addr
)
133 struct xgene_indirect_ctl ctl
= {
134 .addr
= p
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
,
135 .ctl
= p
->mcx_mac_addr
+ MAC_READ_REG_OFFSET
,
136 .cmd
= p
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
,
137 .cmd_done
= p
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
140 return xgene_enet_rd_indirect(&ctl
, rd_addr
);
143 static int xgene_enet_ecc_init(struct xgene_enet_pdata
*p
)
145 struct net_device
*ndev
= p
->ndev
;
149 shutdown
= xgene_enet_rd_diag_csr(p
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
);
150 data
= xgene_enet_rd_diag_csr(p
, ENET_BLOCK_MEM_RDY_ADDR
);
152 if (!shutdown
&& data
== ~0U) {
153 netdev_dbg(ndev
, "+ ecc_init done, skipping\n");
157 xgene_enet_wr_diag_csr(p
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
, 0);
159 usleep_range(100, 110);
160 data
= xgene_enet_rd_diag_csr(p
, ENET_BLOCK_MEM_RDY_ADDR
);
165 netdev_err(ndev
, "Failed to release memory from shutdown\n");
169 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata
*p
)
173 val
= (p
->enet_id
== XGENE_ENET1
) ? 0xffffffff : 0;
174 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIWQASSOC_ADDR
, val
);
175 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIFPQASSOC_ADDR
, val
);
178 static void xgene_mii_phy_write(struct xgene_enet_pdata
*p
, u8 phy_id
,
181 u32 addr
, wr_data
, done
;
184 addr
= PHY_ADDR(phy_id
) | REG_ADDR(reg
);
185 xgene_enet_wr_mac(p
, MII_MGMT_ADDRESS_ADDR
, addr
);
187 wr_data
= PHY_CONTROL(data
);
188 xgene_enet_wr_mac(p
, MII_MGMT_CONTROL_ADDR
, wr_data
);
190 for (i
= 0; i
< 10; i
++) {
191 done
= xgene_enet_rd_mac(p
, MII_MGMT_INDICATORS_ADDR
);
192 if (!(done
& BUSY_MASK
))
194 usleep_range(10, 20);
197 netdev_err(p
->ndev
, "MII_MGMT write failed\n");
200 static u32
xgene_mii_phy_read(struct xgene_enet_pdata
*p
, u8 phy_id
, u32 reg
)
202 u32 addr
, data
, done
;
205 addr
= PHY_ADDR(phy_id
) | REG_ADDR(reg
);
206 xgene_enet_wr_mac(p
, MII_MGMT_ADDRESS_ADDR
, addr
);
207 xgene_enet_wr_mac(p
, MII_MGMT_COMMAND_ADDR
, READ_CYCLE_MASK
);
209 for (i
= 0; i
< 10; i
++) {
210 done
= xgene_enet_rd_mac(p
, MII_MGMT_INDICATORS_ADDR
);
211 if (!(done
& BUSY_MASK
)) {
212 data
= xgene_enet_rd_mac(p
, MII_MGMT_STATUS_ADDR
);
213 xgene_enet_wr_mac(p
, MII_MGMT_COMMAND_ADDR
, 0);
217 usleep_range(10, 20);
220 netdev_err(p
->ndev
, "MII_MGMT read failed\n");
225 static void xgene_sgmac_reset(struct xgene_enet_pdata
*p
)
227 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, SOFT_RESET1
);
228 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, 0);
231 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata
*p
)
234 u8
*dev_addr
= p
->ndev
->dev_addr
;
236 addr0
= (dev_addr
[3] << 24) | (dev_addr
[2] << 16) |
237 (dev_addr
[1] << 8) | dev_addr
[0];
238 xgene_enet_wr_mac(p
, STATION_ADDR0_ADDR
, addr0
);
240 addr1
= xgene_enet_rd_mac(p
, STATION_ADDR1_ADDR
);
241 addr1
|= (dev_addr
[5] << 24) | (dev_addr
[4] << 16);
242 xgene_enet_wr_mac(p
, STATION_ADDR1_ADDR
, addr1
);
245 static u32
xgene_enet_link_status(struct xgene_enet_pdata
*p
)
249 data
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
250 SGMII_BASE_PAGE_ABILITY_ADDR
>> 2);
252 if (LINK_SPEED(data
) == PHY_SPEED_1000
)
253 p
->phy_speed
= SPEED_1000
;
254 else if (LINK_SPEED(data
) == PHY_SPEED_100
)
255 p
->phy_speed
= SPEED_100
;
257 p
->phy_speed
= SPEED_10
;
259 return data
& LINK_UP
;
262 static void xgene_sgmii_configure(struct xgene_enet_pdata
*p
)
264 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2,
266 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_CONTROL_ADDR
>> 2, 0x9000);
267 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2, 0);
270 static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata
*p
)
272 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2,
274 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2, 0);
277 static void xgene_sgmii_reset(struct xgene_enet_pdata
*p
)
281 if (p
->phy_speed
== SPEED_UNKNOWN
)
284 value
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
285 SGMII_BASE_PAGE_ABILITY_ADDR
>> 2);
286 if (!(value
& LINK_UP
))
287 xgene_sgmii_tbi_control_reset(p
);
290 static void xgene_sgmac_set_speed(struct xgene_enet_pdata
*p
)
292 u32 icm0_addr
, icm2_addr
, debug_addr
;
293 u32 icm0
, icm2
, intf_ctl
;
296 xgene_sgmii_reset(p
);
298 if (p
->enet_id
== XGENE_ENET1
) {
299 icm0_addr
= ICM_CONFIG0_REG_0_ADDR
+ p
->port_id
* OFFSET_8
;
300 icm2_addr
= ICM_CONFIG2_REG_0_ADDR
+ p
->port_id
* OFFSET_4
;
301 debug_addr
= DEBUG_REG_ADDR
;
303 icm0_addr
= XG_MCX_ICM_CONFIG0_REG_0_ADDR
;
304 icm2_addr
= XG_MCX_ICM_CONFIG2_REG_0_ADDR
;
305 debug_addr
= XG_DEBUG_REG_ADDR
;
308 icm0
= xgene_enet_rd_mcx_csr(p
, icm0_addr
);
309 icm2
= xgene_enet_rd_mcx_csr(p
, icm2_addr
);
310 mc2
= xgene_enet_rd_mac(p
, MAC_CONFIG_2_ADDR
);
311 intf_ctl
= xgene_enet_rd_mac(p
, INTERFACE_CONTROL_ADDR
);
313 switch (p
->phy_speed
) {
315 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
316 intf_ctl
&= ~(ENET_LHD_MODE
| ENET_GHD_MODE
);
317 CFG_MACMODE_SET(&icm0
, 0);
318 CFG_WAITASYNCRD_SET(&icm2
, 500);
321 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
322 intf_ctl
&= ~ENET_GHD_MODE
;
323 intf_ctl
|= ENET_LHD_MODE
;
324 CFG_MACMODE_SET(&icm0
, 1);
325 CFG_WAITASYNCRD_SET(&icm2
, 80);
328 ENET_INTERFACE_MODE2_SET(&mc2
, 2);
329 intf_ctl
&= ~ENET_LHD_MODE
;
330 intf_ctl
|= ENET_GHD_MODE
;
331 CFG_MACMODE_SET(&icm0
, 2);
332 CFG_WAITASYNCRD_SET(&icm2
, 16);
333 value
= xgene_enet_rd_csr(p
, debug_addr
);
334 value
|= CFG_BYPASS_UNISEC_TX
| CFG_BYPASS_UNISEC_RX
;
335 xgene_enet_wr_csr(p
, debug_addr
, value
);
339 mc2
|= FULL_DUPLEX2
| PAD_CRC
;
340 xgene_enet_wr_mac(p
, MAC_CONFIG_2_ADDR
, mc2
);
341 xgene_enet_wr_mac(p
, INTERFACE_CONTROL_ADDR
, intf_ctl
);
342 xgene_enet_wr_mcx_csr(p
, icm0_addr
, icm0
);
343 xgene_enet_wr_mcx_csr(p
, icm2_addr
, icm2
);
346 static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata
*p
)
350 xgene_sgmii_configure(p
);
353 data
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
354 SGMII_STATUS_ADDR
>> 2);
355 if ((data
& AUTO_NEG_COMPLETE
) && (data
& LINK_STATUS
))
357 usleep_range(1000, 2000);
359 if (!(data
& AUTO_NEG_COMPLETE
) || !(data
& LINK_STATUS
))
360 netdev_err(p
->ndev
, "Auto-negotiation failed\n");
363 static void xgene_sgmac_init(struct xgene_enet_pdata
*p
)
365 u32 enet_spare_cfg_reg
, rsif_config_reg
;
366 u32 cfg_bypass_reg
, rx_dv_gate_reg
;
369 xgene_sgmac_reset(p
);
370 xgene_sgmii_enable_autoneg(p
);
371 xgene_sgmac_set_speed(p
);
372 xgene_sgmac_set_mac_addr(p
);
374 if (p
->enet_id
== XGENE_ENET1
) {
375 enet_spare_cfg_reg
= ENET_SPARE_CFG_REG_ADDR
;
376 rsif_config_reg
= RSIF_CONFIG_REG_ADDR
;
377 cfg_bypass_reg
= CFG_BYPASS_ADDR
;
378 offset
= p
->port_id
* OFFSET_4
;
379 rx_dv_gate_reg
= SG_RX_DV_GATE_REG_0_ADDR
+ offset
;
381 enet_spare_cfg_reg
= XG_ENET_SPARE_CFG_REG_ADDR
;
382 rsif_config_reg
= XG_RSIF_CONFIG_REG_ADDR
;
383 cfg_bypass_reg
= XG_CFG_BYPASS_ADDR
;
384 rx_dv_gate_reg
= XG_MCX_RX_DV_GATE_REG_0_ADDR
;
387 data
= xgene_enet_rd_csr(p
, enet_spare_cfg_reg
);
388 data
|= MPA_IDLE_WITH_QMI_EMPTY
;
389 xgene_enet_wr_csr(p
, enet_spare_cfg_reg
, data
);
391 /* Adjust MDC clock frequency */
392 data
= xgene_enet_rd_mac(p
, MII_MGMT_CONFIG_ADDR
);
393 MGMT_CLOCK_SEL_SET(&data
, 7);
394 xgene_enet_wr_mac(p
, MII_MGMT_CONFIG_ADDR
, data
);
396 /* Enable drop if bufpool not available */
397 data
= xgene_enet_rd_csr(p
, rsif_config_reg
);
398 data
|= CFG_RSIF_FPBUFF_TIMEOUT_EN
;
399 xgene_enet_wr_csr(p
, rsif_config_reg
, data
);
401 /* Bypass traffic gating */
402 xgene_enet_wr_csr(p
, XG_ENET_SPARE_CFG_REG_1_ADDR
, 0x84);
403 xgene_enet_wr_csr(p
, cfg_bypass_reg
, RESUME_TX
);
404 xgene_enet_wr_mcx_csr(p
, rx_dv_gate_reg
, RESUME_RX0
);
407 static void xgene_sgmac_rxtx(struct xgene_enet_pdata
*p
, u32 bits
, bool set
)
411 data
= xgene_enet_rd_mac(p
, MAC_CONFIG_1_ADDR
);
418 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, data
);
421 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata
*p
)
423 xgene_sgmac_rxtx(p
, RX_EN
, true);
426 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata
*p
)
428 xgene_sgmac_rxtx(p
, TX_EN
, true);
431 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata
*p
)
433 xgene_sgmac_rxtx(p
, RX_EN
, false);
436 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata
*p
)
438 xgene_sgmac_rxtx(p
, TX_EN
, false);
441 static int xgene_enet_reset(struct xgene_enet_pdata
*p
)
443 struct device
*dev
= &p
->pdev
->dev
;
445 if (!xgene_ring_mgr_init(p
))
448 if (p
->enet_id
== XGENE_ENET2
)
449 xgene_enet_wr_clkrst_csr(p
, XGENET_CONFIG_REG_ADDR
, SGMII_EN
);
452 if (!IS_ERR(p
->clk
)) {
453 clk_prepare_enable(p
->clk
);
455 clk_disable_unprepare(p
->clk
);
457 clk_prepare_enable(p
->clk
);
462 if (acpi_has_method(ACPI_HANDLE(&p
->pdev
->dev
), "_RST"))
463 acpi_evaluate_object(ACPI_HANDLE(&p
->pdev
->dev
),
465 else if (acpi_has_method(ACPI_HANDLE(&p
->pdev
->dev
), "_INI"))
466 acpi_evaluate_object(ACPI_HANDLE(&p
->pdev
->dev
),
472 xgene_enet_ecc_init(p
);
473 xgene_enet_config_ring_if_assoc(p
);
479 static void xgene_enet_cle_bypass(struct xgene_enet_pdata
*p
,
480 u32 dst_ring_num
, u16 bufpool_id
)
483 u32 cle_bypass_reg0
, cle_bypass_reg1
;
484 u32 offset
= p
->port_id
* MAC_OFFSET
;
486 if (p
->enet_id
== XGENE_ENET1
) {
487 cle_bypass_reg0
= CLE_BYPASS_REG0_0_ADDR
;
488 cle_bypass_reg1
= CLE_BYPASS_REG1_0_ADDR
;
490 cle_bypass_reg0
= XCLE_BYPASS_REG0_ADDR
;
491 cle_bypass_reg1
= XCLE_BYPASS_REG1_ADDR
;
494 data
= CFG_CLE_BYPASS_EN0
;
495 xgene_enet_wr_csr(p
, cle_bypass_reg0
+ offset
, data
);
497 fpsel
= xgene_enet_ring_bufnum(bufpool_id
) - 0x20;
498 data
= CFG_CLE_DSTQID0(dst_ring_num
) | CFG_CLE_FPSEL0(fpsel
);
499 xgene_enet_wr_csr(p
, cle_bypass_reg1
+ offset
, data
);
502 static void xgene_enet_clear(struct xgene_enet_pdata
*pdata
,
503 struct xgene_enet_desc_ring
*ring
)
507 val
= xgene_enet_ring_bufnum(ring
->id
);
509 if (xgene_enet_is_bufpool(ring
->id
)) {
510 addr
= ENET_CFGSSQMIFPRESET_ADDR
;
511 data
= BIT(val
- 0x20);
513 addr
= ENET_CFGSSQMIWQRESET_ADDR
;
517 xgene_enet_wr_ring_if(pdata
, addr
, data
);
520 static void xgene_enet_shutdown(struct xgene_enet_pdata
*p
)
522 struct device
*dev
= &p
->pdev
->dev
;
523 struct xgene_enet_desc_ring
*ring
;
528 for (i
= 0; i
< p
->rxq_cnt
; i
++) {
529 ring
= p
->rx_ring
[i
]->buf_pool
;
531 val
= xgene_enet_ring_bufnum(ring
->id
);
532 pb
|= BIT(val
- 0x20);
534 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIFPRESET_ADDR
, pb
);
537 for (i
= 0; i
< p
->txq_cnt
; i
++) {
538 ring
= p
->tx_ring
[i
];
540 val
= xgene_enet_ring_bufnum(ring
->id
);
543 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIWQRESET_ADDR
, pb
);
547 clk_disable_unprepare(p
->clk
);
551 static void xgene_enet_link_state(struct work_struct
*work
)
553 struct xgene_enet_pdata
*p
= container_of(to_delayed_work(work
),
554 struct xgene_enet_pdata
, link_work
);
555 struct net_device
*ndev
= p
->ndev
;
556 u32 link
, poll_interval
;
558 link
= xgene_enet_link_status(p
);
560 if (!netif_carrier_ok(ndev
)) {
561 netif_carrier_on(ndev
);
562 xgene_sgmac_set_speed(p
);
563 xgene_sgmac_rx_enable(p
);
564 xgene_sgmac_tx_enable(p
);
565 netdev_info(ndev
, "Link is Up - %dMbps\n",
568 poll_interval
= PHY_POLL_LINK_ON
;
570 if (netif_carrier_ok(ndev
)) {
571 xgene_sgmac_rx_disable(p
);
572 xgene_sgmac_tx_disable(p
);
573 netif_carrier_off(ndev
);
574 netdev_info(ndev
, "Link is Down\n");
576 poll_interval
= PHY_POLL_LINK_OFF
;
579 schedule_delayed_work(&p
->link_work
, poll_interval
);
582 const struct xgene_mac_ops xgene_sgmac_ops
= {
583 .init
= xgene_sgmac_init
,
584 .reset
= xgene_sgmac_reset
,
585 .rx_enable
= xgene_sgmac_rx_enable
,
586 .tx_enable
= xgene_sgmac_tx_enable
,
587 .rx_disable
= xgene_sgmac_rx_disable
,
588 .tx_disable
= xgene_sgmac_tx_disable
,
589 .set_speed
= xgene_sgmac_set_speed
,
590 .set_mac_addr
= xgene_sgmac_set_mac_addr
,
591 .link_state
= xgene_enet_link_state
594 const struct xgene_port_ops xgene_sgport_ops
= {
595 .reset
= xgene_enet_reset
,
596 .clear
= xgene_enet_clear
,
597 .cle_bypass
= xgene_enet_cle_bypass
,
598 .shutdown
= xgene_enet_shutdown