1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
44 enum mvpp2_bm_pool_log_num
{
54 } mvpp2_pools
[MVPP2_BM_POOLS_NUM
];
56 /* The prototype is added here to be used in start_dev when using ACPI. This
57 * will be removed once phylink is used for all modes (dt+ACPI).
59 static void mvpp2_mac_config(struct net_device
*dev
, unsigned int mode
,
60 const struct phylink_link_state
*state
);
61 static void mvpp2_mac_link_up(struct net_device
*dev
, unsigned int mode
,
62 phy_interface_t interface
, struct phy_device
*phy
);
65 #define MVPP2_QDIST_SINGLE_MODE 0
66 #define MVPP2_QDIST_MULTI_MODE 1
68 static int queue_mode
= MVPP2_QDIST_MULTI_MODE
;
70 module_param(queue_mode
, int, 0444);
71 MODULE_PARM_DESC(queue_mode
, "Set queue_mode (single=0, multi=1)");
73 /* Utility/helper methods */
75 void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
77 writel(data
, priv
->swth_base
[0] + offset
);
80 u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
82 return readl(priv
->swth_base
[0] + offset
);
85 u32
mvpp2_read_relaxed(struct mvpp2
*priv
, u32 offset
)
87 return readl_relaxed(priv
->swth_base
[0] + offset
);
89 /* These accessors should be used to access:
91 * - per-CPU registers, where each CPU has its own copy of the
94 * MVPP2_BM_VIRT_ALLOC_REG
95 * MVPP2_BM_ADDR_HIGH_ALLOC
96 * MVPP22_BM_ADDR_HIGH_RLS_REG
97 * MVPP2_BM_VIRT_RLS_REG
98 * MVPP2_ISR_RX_TX_CAUSE_REG
99 * MVPP2_ISR_RX_TX_MASK_REG
101 * MVPP2_AGGR_TXQ_UPDATE_REG
102 * MVPP2_TXQ_RSVD_REQ_REG
103 * MVPP2_TXQ_RSVD_RSLT_REG
107 * - global registers that must be accessed through a specific CPU
108 * window, because they are related to an access to a per-CPU
111 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
112 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
113 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
114 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
115 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
116 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
117 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
118 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
119 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
120 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
121 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
122 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
123 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
125 void mvpp2_percpu_write(struct mvpp2
*priv
, int cpu
,
126 u32 offset
, u32 data
)
128 writel(data
, priv
->swth_base
[cpu
] + offset
);
131 u32
mvpp2_percpu_read(struct mvpp2
*priv
, int cpu
,
134 return readl(priv
->swth_base
[cpu
] + offset
);
137 void mvpp2_percpu_write_relaxed(struct mvpp2
*priv
, int cpu
,
138 u32 offset
, u32 data
)
140 writel_relaxed(data
, priv
->swth_base
[cpu
] + offset
);
143 static u32
mvpp2_percpu_read_relaxed(struct mvpp2
*priv
, int cpu
,
146 return readl_relaxed(priv
->swth_base
[cpu
] + offset
);
149 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
150 struct mvpp2_tx_desc
*tx_desc
)
152 if (port
->priv
->hw_version
== MVPP21
)
153 return le32_to_cpu(tx_desc
->pp21
.buf_dma_addr
);
155 return le64_to_cpu(tx_desc
->pp22
.buf_dma_addr_ptp
) &
159 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
160 struct mvpp2_tx_desc
*tx_desc
,
163 dma_addr_t addr
, offset
;
165 addr
= dma_addr
& ~MVPP2_TX_DESC_ALIGN
;
166 offset
= dma_addr
& MVPP2_TX_DESC_ALIGN
;
168 if (port
->priv
->hw_version
== MVPP21
) {
169 tx_desc
->pp21
.buf_dma_addr
= cpu_to_le32(addr
);
170 tx_desc
->pp21
.packet_offset
= offset
;
172 __le64 val
= cpu_to_le64(addr
);
174 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~cpu_to_le64(MVPP2_DESC_DMA_MASK
);
175 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
176 tx_desc
->pp22
.packet_offset
= offset
;
180 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
181 struct mvpp2_tx_desc
*tx_desc
)
183 if (port
->priv
->hw_version
== MVPP21
)
184 return le16_to_cpu(tx_desc
->pp21
.data_size
);
186 return le16_to_cpu(tx_desc
->pp22
.data_size
);
189 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
190 struct mvpp2_tx_desc
*tx_desc
,
193 if (port
->priv
->hw_version
== MVPP21
)
194 tx_desc
->pp21
.data_size
= cpu_to_le16(size
);
196 tx_desc
->pp22
.data_size
= cpu_to_le16(size
);
199 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
200 struct mvpp2_tx_desc
*tx_desc
,
203 if (port
->priv
->hw_version
== MVPP21
)
204 tx_desc
->pp21
.phys_txq
= txq
;
206 tx_desc
->pp22
.phys_txq
= txq
;
209 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
210 struct mvpp2_tx_desc
*tx_desc
,
211 unsigned int command
)
213 if (port
->priv
->hw_version
== MVPP21
)
214 tx_desc
->pp21
.command
= cpu_to_le32(command
);
216 tx_desc
->pp22
.command
= cpu_to_le32(command
);
219 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
220 struct mvpp2_tx_desc
*tx_desc
)
222 if (port
->priv
->hw_version
== MVPP21
)
223 return tx_desc
->pp21
.packet_offset
;
225 return tx_desc
->pp22
.packet_offset
;
228 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
229 struct mvpp2_rx_desc
*rx_desc
)
231 if (port
->priv
->hw_version
== MVPP21
)
232 return le32_to_cpu(rx_desc
->pp21
.buf_dma_addr
);
234 return le64_to_cpu(rx_desc
->pp22
.buf_dma_addr_key_hash
) &
238 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
239 struct mvpp2_rx_desc
*rx_desc
)
241 if (port
->priv
->hw_version
== MVPP21
)
242 return le32_to_cpu(rx_desc
->pp21
.buf_cookie
);
244 return le64_to_cpu(rx_desc
->pp22
.buf_cookie_misc
) &
248 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
249 struct mvpp2_rx_desc
*rx_desc
)
251 if (port
->priv
->hw_version
== MVPP21
)
252 return le16_to_cpu(rx_desc
->pp21
.data_size
);
254 return le16_to_cpu(rx_desc
->pp22
.data_size
);
257 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
258 struct mvpp2_rx_desc
*rx_desc
)
260 if (port
->priv
->hw_version
== MVPP21
)
261 return le32_to_cpu(rx_desc
->pp21
.status
);
263 return le32_to_cpu(rx_desc
->pp22
.status
);
266 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
268 txq_pcpu
->txq_get_index
++;
269 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
270 txq_pcpu
->txq_get_index
= 0;
273 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
274 struct mvpp2_txq_pcpu
*txq_pcpu
,
276 struct mvpp2_tx_desc
*tx_desc
)
278 struct mvpp2_txq_pcpu_buf
*tx_buf
=
279 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
281 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
282 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
283 mvpp2_txdesc_offset_get(port
, tx_desc
);
284 txq_pcpu
->txq_put_index
++;
285 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
286 txq_pcpu
->txq_put_index
= 0;
289 /* Get number of physical egress port */
290 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
292 return MVPP2_MAX_TCONT
+ port
->id
;
295 /* Get number of physical TXQ */
296 static inline int mvpp2_txq_phys(int port
, int txq
)
298 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
301 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
303 if (likely(pool
->frag_size
<= PAGE_SIZE
))
304 return netdev_alloc_frag(pool
->frag_size
);
306 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
309 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
311 if (likely(pool
->frag_size
<= PAGE_SIZE
))
317 /* Buffer Manager configuration routines */
320 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
322 struct mvpp2_bm_pool
*bm_pool
, int size
)
326 /* Number of buffer pointers must be a multiple of 16, as per
327 * hardware constraints
329 if (!IS_ALIGNED(size
, 16))
332 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
333 * bytes per buffer pointer
335 if (priv
->hw_version
== MVPP21
)
336 bm_pool
->size_bytes
= 2 * sizeof(u32
) * size
;
338 bm_pool
->size_bytes
= 2 * sizeof(u64
) * size
;
340 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
343 if (!bm_pool
->virt_addr
)
346 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
347 MVPP2_BM_POOL_PTR_ALIGN
)) {
348 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
349 bm_pool
->virt_addr
, bm_pool
->dma_addr
);
350 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
351 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
355 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
356 lower_32_bits(bm_pool
->dma_addr
));
357 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
359 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
360 val
|= MVPP2_BM_START_MASK
;
361 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
363 bm_pool
->size
= size
;
364 bm_pool
->pkt_size
= 0;
365 bm_pool
->buf_num
= 0;
370 /* Set pool buffer size */
371 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
372 struct mvpp2_bm_pool
*bm_pool
,
377 bm_pool
->buf_size
= buf_size
;
379 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
380 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
383 static void mvpp2_bm_bufs_get_addrs(struct device
*dev
, struct mvpp2
*priv
,
384 struct mvpp2_bm_pool
*bm_pool
,
385 dma_addr_t
*dma_addr
,
386 phys_addr_t
*phys_addr
)
390 *dma_addr
= mvpp2_percpu_read(priv
, cpu
,
391 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
392 *phys_addr
= mvpp2_percpu_read(priv
, cpu
, MVPP2_BM_VIRT_ALLOC_REG
);
394 if (priv
->hw_version
== MVPP22
) {
396 u32 dma_addr_highbits
, phys_addr_highbits
;
398 val
= mvpp2_percpu_read(priv
, cpu
, MVPP22_BM_ADDR_HIGH_ALLOC
);
399 dma_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_PHYS_MASK
);
400 phys_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_VIRT_MASK
) >>
401 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT
;
403 if (sizeof(dma_addr_t
) == 8)
404 *dma_addr
|= (u64
)dma_addr_highbits
<< 32;
406 if (sizeof(phys_addr_t
) == 8)
407 *phys_addr
|= (u64
)phys_addr_highbits
<< 32;
413 /* Free all buffers from the pool */
414 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
415 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
419 if (buf_num
> bm_pool
->buf_num
) {
420 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
421 bm_pool
->id
, buf_num
);
422 buf_num
= bm_pool
->buf_num
;
425 for (i
= 0; i
< buf_num
; i
++) {
426 dma_addr_t buf_dma_addr
;
427 phys_addr_t buf_phys_addr
;
430 mvpp2_bm_bufs_get_addrs(dev
, priv
, bm_pool
,
431 &buf_dma_addr
, &buf_phys_addr
);
433 dma_unmap_single(dev
, buf_dma_addr
,
434 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
436 data
= (void *)phys_to_virt(buf_phys_addr
);
440 mvpp2_frag_free(bm_pool
, data
);
443 /* Update BM driver with number of buffers removed from pool */
444 bm_pool
->buf_num
-= i
;
447 /* Check number of buffers in BM pool */
448 static int mvpp2_check_hw_buf_num(struct mvpp2
*priv
, struct mvpp2_bm_pool
*bm_pool
)
452 buf_num
+= mvpp2_read(priv
, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool
->id
)) &
453 MVPP22_BM_POOL_PTRS_NUM_MASK
;
454 buf_num
+= mvpp2_read(priv
, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool
->id
)) &
455 MVPP2_BM_BPPI_PTR_NUM_MASK
;
457 /* HW has one buffer ready which is not reflected in the counters */
465 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
467 struct mvpp2_bm_pool
*bm_pool
)
472 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
473 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
, buf_num
);
475 /* Check buffer counters after free */
476 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
478 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
479 bm_pool
->id
, bm_pool
->buf_num
);
483 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
484 val
|= MVPP2_BM_STOP_MASK
;
485 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
487 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
493 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
497 struct mvpp2_bm_pool
*bm_pool
;
499 /* Create all pools with maximum size */
500 size
= MVPP2_BM_POOL_SIZE_MAX
;
501 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
502 bm_pool
= &priv
->bm_pools
[i
];
504 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
506 goto err_unroll_pools
;
507 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
512 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
513 for (i
= i
- 1; i
>= 0; i
--)
514 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
518 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
522 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
523 /* Mask BM all interrupts */
524 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
525 /* Clear BM cause register */
526 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
529 /* Allocate and initialize BM pools */
530 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
531 sizeof(*priv
->bm_pools
), GFP_KERNEL
);
535 err
= mvpp2_bm_pools_init(pdev
, priv
);
541 static void mvpp2_setup_bm_pool(void)
544 mvpp2_pools
[MVPP2_BM_SHORT
].buf_num
= MVPP2_BM_SHORT_BUF_NUM
;
545 mvpp2_pools
[MVPP2_BM_SHORT
].pkt_size
= MVPP2_BM_SHORT_PKT_SIZE
;
548 mvpp2_pools
[MVPP2_BM_LONG
].buf_num
= MVPP2_BM_LONG_BUF_NUM
;
549 mvpp2_pools
[MVPP2_BM_LONG
].pkt_size
= MVPP2_BM_LONG_PKT_SIZE
;
552 mvpp2_pools
[MVPP2_BM_JUMBO
].buf_num
= MVPP2_BM_JUMBO_BUF_NUM
;
553 mvpp2_pools
[MVPP2_BM_JUMBO
].pkt_size
= MVPP2_BM_JUMBO_PKT_SIZE
;
556 /* Attach long pool to rxq */
557 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
558 int lrxq
, int long_pool
)
563 /* Get queue physical ID */
564 prxq
= port
->rxqs
[lrxq
]->id
;
566 if (port
->priv
->hw_version
== MVPP21
)
567 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
569 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
571 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
573 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
574 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
577 /* Attach short pool to rxq */
578 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
579 int lrxq
, int short_pool
)
584 /* Get queue physical ID */
585 prxq
= port
->rxqs
[lrxq
]->id
;
587 if (port
->priv
->hw_version
== MVPP21
)
588 mask
= MVPP21_RXQ_POOL_SHORT_MASK
;
590 mask
= MVPP22_RXQ_POOL_SHORT_MASK
;
592 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
594 val
|= (short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) & mask
;
595 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
598 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
599 struct mvpp2_bm_pool
*bm_pool
,
600 dma_addr_t
*buf_dma_addr
,
601 phys_addr_t
*buf_phys_addr
,
607 data
= mvpp2_frag_alloc(bm_pool
);
611 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
612 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
614 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
615 mvpp2_frag_free(bm_pool
, data
);
618 *buf_dma_addr
= dma_addr
;
619 *buf_phys_addr
= virt_to_phys(data
);
624 /* Release buffer to BM */
625 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
626 dma_addr_t buf_dma_addr
,
627 phys_addr_t buf_phys_addr
)
631 if (port
->priv
->hw_version
== MVPP22
) {
634 if (sizeof(dma_addr_t
) == 8)
635 val
|= upper_32_bits(buf_dma_addr
) &
636 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
638 if (sizeof(phys_addr_t
) == 8)
639 val
|= (upper_32_bits(buf_phys_addr
)
640 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
641 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
643 mvpp2_percpu_write_relaxed(port
->priv
, cpu
,
644 MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
647 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
648 * returned in the "cookie" field of the RX
649 * descriptor. Instead of storing the virtual address, we
650 * store the physical address
652 mvpp2_percpu_write_relaxed(port
->priv
, cpu
,
653 MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
654 mvpp2_percpu_write_relaxed(port
->priv
, cpu
,
655 MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
660 /* Allocate buffers for the pool */
661 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
662 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
664 int i
, buf_size
, total_size
;
666 phys_addr_t phys_addr
;
669 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
670 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
673 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
674 netdev_err(port
->dev
,
675 "cannot allocate %d buffers for pool %d\n",
676 buf_num
, bm_pool
->id
);
680 for (i
= 0; i
< buf_num
; i
++) {
681 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
682 &phys_addr
, GFP_KERNEL
);
686 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
690 /* Update BM driver with number of buffers added to pool */
691 bm_pool
->buf_num
+= i
;
693 netdev_dbg(port
->dev
,
694 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
695 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
697 netdev_dbg(port
->dev
,
698 "pool %d: %d of %d buffers added\n",
699 bm_pool
->id
, i
, buf_num
);
703 /* Notify the driver that BM pool is being used as specific type and return the
704 * pool pointer on success
706 static struct mvpp2_bm_pool
*
707 mvpp2_bm_pool_use(struct mvpp2_port
*port
, unsigned pool
, int pkt_size
)
709 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
712 if (pool
>= MVPP2_BM_POOLS_NUM
) {
713 netdev_err(port
->dev
, "Invalid pool %d\n", pool
);
717 /* Allocate buffers in case BM pool is used as long pool, but packet
718 * size doesn't match MTU or BM pool hasn't being used yet
720 if (new_pool
->pkt_size
== 0) {
723 /* Set default buffer number or free all the buffers in case
724 * the pool is not empty
726 pkts_num
= new_pool
->buf_num
;
728 pkts_num
= mvpp2_pools
[pool
].buf_num
;
730 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
731 port
->priv
, new_pool
, pkts_num
);
733 new_pool
->pkt_size
= pkt_size
;
734 new_pool
->frag_size
=
735 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
736 MVPP2_SKB_SHINFO_SIZE
;
738 /* Allocate buffers for this pool */
739 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
740 if (num
!= pkts_num
) {
741 WARN(1, "pool %d: %d of %d allocated\n",
742 new_pool
->id
, num
, pkts_num
);
747 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
748 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
753 /* Initialize pools for swf */
754 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
757 enum mvpp2_bm_pool_log_num long_log_pool
, short_log_pool
;
759 /* If port pkt_size is higher than 1518B:
760 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
761 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
763 if (port
->pkt_size
> MVPP2_BM_LONG_PKT_SIZE
) {
764 long_log_pool
= MVPP2_BM_JUMBO
;
765 short_log_pool
= MVPP2_BM_LONG
;
767 long_log_pool
= MVPP2_BM_LONG
;
768 short_log_pool
= MVPP2_BM_SHORT
;
771 if (!port
->pool_long
) {
773 mvpp2_bm_pool_use(port
, long_log_pool
,
774 mvpp2_pools
[long_log_pool
].pkt_size
);
775 if (!port
->pool_long
)
778 port
->pool_long
->port_map
|= BIT(port
->id
);
780 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
781 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
784 if (!port
->pool_short
) {
786 mvpp2_bm_pool_use(port
, short_log_pool
,
787 mvpp2_pools
[short_log_pool
].pkt_size
);
788 if (!port
->pool_short
)
791 port
->pool_short
->port_map
|= BIT(port
->id
);
793 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
794 mvpp2_rxq_short_pool_set(port
, rxq
,
795 port
->pool_short
->id
);
801 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
803 struct mvpp2_port
*port
= netdev_priv(dev
);
804 enum mvpp2_bm_pool_log_num new_long_pool
;
805 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
807 /* If port MTU is higher than 1518B:
808 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
809 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
811 if (pkt_size
> MVPP2_BM_LONG_PKT_SIZE
)
812 new_long_pool
= MVPP2_BM_JUMBO
;
814 new_long_pool
= MVPP2_BM_LONG
;
816 if (new_long_pool
!= port
->pool_long
->id
) {
817 /* Remove port from old short & long pool */
818 port
->pool_long
= mvpp2_bm_pool_use(port
, port
->pool_long
->id
,
819 port
->pool_long
->pkt_size
);
820 port
->pool_long
->port_map
&= ~BIT(port
->id
);
821 port
->pool_long
= NULL
;
823 port
->pool_short
= mvpp2_bm_pool_use(port
, port
->pool_short
->id
,
824 port
->pool_short
->pkt_size
);
825 port
->pool_short
->port_map
&= ~BIT(port
->id
);
826 port
->pool_short
= NULL
;
828 port
->pkt_size
= pkt_size
;
830 /* Add port to new short & long pool */
831 mvpp2_swf_bm_pool_init(port
);
833 /* Update L4 checksum when jumbo enable/disable on port */
834 if (new_long_pool
== MVPP2_BM_JUMBO
&& port
->id
!= 0) {
835 dev
->features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
836 dev
->hw_features
&= ~(NETIF_F_IP_CSUM
|
839 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
840 dev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
845 dev
->wanted_features
= dev
->features
;
847 netdev_update_features(dev
);
851 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
853 int i
, sw_thread_mask
= 0;
855 for (i
= 0; i
< port
->nqvecs
; i
++)
856 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
858 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
859 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask
));
862 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
864 int i
, sw_thread_mask
= 0;
866 for (i
= 0; i
< port
->nqvecs
; i
++)
867 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
869 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
870 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask
));
873 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector
*qvec
)
875 struct mvpp2_port
*port
= qvec
->port
;
877 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
878 MVPP2_ISR_ENABLE_INTERRUPT(qvec
->sw_thread_mask
));
881 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector
*qvec
)
883 struct mvpp2_port
*port
= qvec
->port
;
885 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
886 MVPP2_ISR_DISABLE_INTERRUPT(qvec
->sw_thread_mask
));
889 /* Mask the current CPU's Rx/Tx interrupts
890 * Called by on_each_cpu(), guaranteed to run with migration disabled,
891 * using smp_processor_id() is OK.
893 static void mvpp2_interrupts_mask(void *arg
)
895 struct mvpp2_port
*port
= arg
;
897 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
898 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
901 /* Unmask the current CPU's Rx/Tx interrupts.
902 * Called by on_each_cpu(), guaranteed to run with migration disabled,
903 * using smp_processor_id() is OK.
905 static void mvpp2_interrupts_unmask(void *arg
)
907 struct mvpp2_port
*port
= arg
;
910 val
= MVPP2_CAUSE_MISC_SUM_MASK
|
911 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
912 if (port
->has_tx_irqs
)
913 val
|= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
915 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
916 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
920 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port
*port
, bool mask
)
925 if (port
->priv
->hw_version
!= MVPP22
)
931 val
= MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
933 for (i
= 0; i
< port
->nqvecs
; i
++) {
934 struct mvpp2_queue_vector
*v
= port
->qvecs
+ i
;
936 if (v
->type
!= MVPP2_QUEUE_VECTOR_SHARED
)
939 mvpp2_percpu_write(port
->priv
, v
->sw_thread_id
,
940 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
944 /* Port configuration routines */
946 static void mvpp22_gop_init_rgmii(struct mvpp2_port
*port
)
948 struct mvpp2
*priv
= port
->priv
;
951 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
952 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
;
953 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
955 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
956 if (port
->gop_id
== 2)
957 val
|= GENCONF_CTRL0_PORT0_RGMII
| GENCONF_CTRL0_PORT1_RGMII
;
958 else if (port
->gop_id
== 3)
959 val
|= GENCONF_CTRL0_PORT1_RGMII_MII
;
960 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
963 static void mvpp22_gop_init_sgmii(struct mvpp2_port
*port
)
965 struct mvpp2
*priv
= port
->priv
;
968 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
969 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
|
970 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE
;
971 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
973 if (port
->gop_id
> 1) {
974 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
975 if (port
->gop_id
== 2)
976 val
&= ~GENCONF_CTRL0_PORT0_RGMII
;
977 else if (port
->gop_id
== 3)
978 val
&= ~GENCONF_CTRL0_PORT1_RGMII_MII
;
979 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
983 static void mvpp22_gop_init_10gkr(struct mvpp2_port
*port
)
985 struct mvpp2
*priv
= port
->priv
;
986 void __iomem
*mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
987 void __iomem
*xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
991 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
992 val
&= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
993 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
994 val
|= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
995 writel(val
, xpcs
+ MVPP22_XPCS_CFG0
);
998 val
= readl(mpcs
+ MVPP22_MPCS_CTRL
);
999 val
&= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN
;
1000 writel(val
, mpcs
+ MVPP22_MPCS_CTRL
);
1002 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
1003 val
&= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC
|
1004 MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
);
1005 val
|= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1006 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1008 val
&= ~MVPP22_MPCS_CLK_RESET_DIV_SET
;
1009 val
|= MAC_CLK_RESET_MAC
| MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
;
1010 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1013 static int mvpp22_gop_init(struct mvpp2_port
*port
)
1015 struct mvpp2
*priv
= port
->priv
;
1018 if (!priv
->sysctrl_base
)
1021 switch (port
->phy_interface
) {
1022 case PHY_INTERFACE_MODE_RGMII
:
1023 case PHY_INTERFACE_MODE_RGMII_ID
:
1024 case PHY_INTERFACE_MODE_RGMII_RXID
:
1025 case PHY_INTERFACE_MODE_RGMII_TXID
:
1026 if (port
->gop_id
== 0)
1028 mvpp22_gop_init_rgmii(port
);
1030 case PHY_INTERFACE_MODE_SGMII
:
1031 case PHY_INTERFACE_MODE_1000BASEX
:
1032 case PHY_INTERFACE_MODE_2500BASEX
:
1033 mvpp22_gop_init_sgmii(port
);
1035 case PHY_INTERFACE_MODE_10GKR
:
1036 if (port
->gop_id
!= 0)
1038 mvpp22_gop_init_10gkr(port
);
1041 goto unsupported_conf
;
1044 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, &val
);
1045 val
|= GENCONF_PORT_CTRL1_RESET(port
->gop_id
) |
1046 GENCONF_PORT_CTRL1_EN(port
->gop_id
);
1047 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, val
);
1049 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
1050 val
|= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR
;
1051 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
1053 regmap_read(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, &val
);
1054 val
|= GENCONF_SOFT_RESET1_GOP
;
1055 regmap_write(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, val
);
1061 netdev_err(port
->dev
, "Invalid port configuration\n");
1065 static void mvpp22_gop_unmask_irq(struct mvpp2_port
*port
)
1069 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1070 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1071 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1072 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1073 /* Enable the GMAC link status irq for this port */
1074 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1075 val
|= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1076 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1079 if (port
->gop_id
== 0) {
1080 /* Enable the XLG/GIG irqs for this port */
1081 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1082 if (port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
1083 val
|= MVPP22_XLG_EXT_INT_MASK_XLG
;
1085 val
|= MVPP22_XLG_EXT_INT_MASK_GIG
;
1086 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1090 static void mvpp22_gop_mask_irq(struct mvpp2_port
*port
)
1094 if (port
->gop_id
== 0) {
1095 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1096 val
&= ~(MVPP22_XLG_EXT_INT_MASK_XLG
|
1097 MVPP22_XLG_EXT_INT_MASK_GIG
);
1098 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1101 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1102 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1103 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1104 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1105 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1106 val
&= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1107 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1111 static void mvpp22_gop_setup_irq(struct mvpp2_port
*port
)
1115 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1116 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1117 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1118 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1119 val
= readl(port
->base
+ MVPP22_GMAC_INT_MASK
);
1120 val
|= MVPP22_GMAC_INT_MASK_LINK_STAT
;
1121 writel(val
, port
->base
+ MVPP22_GMAC_INT_MASK
);
1124 if (port
->gop_id
== 0) {
1125 val
= readl(port
->base
+ MVPP22_XLG_INT_MASK
);
1126 val
|= MVPP22_XLG_INT_MASK_LINK
;
1127 writel(val
, port
->base
+ MVPP22_XLG_INT_MASK
);
1130 mvpp22_gop_unmask_irq(port
);
1133 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1135 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1136 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1139 * The COMPHY configures the serdes lanes regardless of the actual use of the
1140 * lanes by the physical layer. This is why configurations like
1141 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1143 static int mvpp22_comphy_init(struct mvpp2_port
*port
)
1151 switch (port
->phy_interface
) {
1152 case PHY_INTERFACE_MODE_SGMII
:
1153 case PHY_INTERFACE_MODE_1000BASEX
:
1154 mode
= PHY_MODE_SGMII
;
1156 case PHY_INTERFACE_MODE_2500BASEX
:
1157 mode
= PHY_MODE_2500SGMII
;
1159 case PHY_INTERFACE_MODE_10GKR
:
1160 mode
= PHY_MODE_10GKR
;
1166 ret
= phy_set_mode(port
->comphy
, mode
);
1170 return phy_power_on(port
->comphy
);
1173 static void mvpp2_port_enable(struct mvpp2_port
*port
)
1177 /* Only GOP port 0 has an XLG MAC */
1178 if (port
->gop_id
== 0 &&
1179 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
1180 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
1181 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1182 val
|= MVPP22_XLG_CTRL0_PORT_EN
|
1183 MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
1184 val
&= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS
;
1185 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1187 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1188 val
|= MVPP2_GMAC_PORT_EN_MASK
;
1189 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
1190 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1194 static void mvpp2_port_disable(struct mvpp2_port
*port
)
1198 /* Only GOP port 0 has an XLG MAC */
1199 if (port
->gop_id
== 0 &&
1200 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
1201 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
1202 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1203 val
&= ~MVPP22_XLG_CTRL0_PORT_EN
;
1204 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1206 /* Disable & reset should be done separately */
1207 val
&= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
1208 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1210 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1211 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
1212 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1216 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1217 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
1221 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
1222 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
1223 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1226 /* Configure loopback port */
1227 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
,
1228 const struct phylink_link_state
*state
)
1232 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1234 if (state
->speed
== 1000)
1235 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
1237 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
1239 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1240 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1241 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
)
1242 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
1244 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
1246 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1249 struct mvpp2_ethtool_counter
{
1250 unsigned int offset
;
1251 const char string
[ETH_GSTRING_LEN
];
1255 static u64
mvpp2_read_count(struct mvpp2_port
*port
,
1256 const struct mvpp2_ethtool_counter
*counter
)
1260 val
= readl(port
->stats_base
+ counter
->offset
);
1261 if (counter
->reg_is_64b
)
1262 val
+= (u64
)readl(port
->stats_base
+ counter
->offset
+ 4) << 32;
1267 /* Due to the fact that software statistics and hardware statistics are, by
1268 * design, incremented at different moments in the chain of packet processing,
1269 * it is very likely that incoming packets could have been dropped after being
1270 * counted by hardware but before reaching software statistics (most probably
1271 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1272 * are added in between as well as TSO skb will be split and header bytes added.
1273 * Hence, statistics gathered from userspace with ifconfig (software) and
1274 * ethtool (hardware) cannot be compared.
1276 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs
[] = {
1277 { MVPP2_MIB_GOOD_OCTETS_RCVD
, "good_octets_received", true },
1278 { MVPP2_MIB_BAD_OCTETS_RCVD
, "bad_octets_received" },
1279 { MVPP2_MIB_CRC_ERRORS_SENT
, "crc_errors_sent" },
1280 { MVPP2_MIB_UNICAST_FRAMES_RCVD
, "unicast_frames_received" },
1281 { MVPP2_MIB_BROADCAST_FRAMES_RCVD
, "broadcast_frames_received" },
1282 { MVPP2_MIB_MULTICAST_FRAMES_RCVD
, "multicast_frames_received" },
1283 { MVPP2_MIB_FRAMES_64_OCTETS
, "frames_64_octets" },
1284 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS
, "frames_65_to_127_octet" },
1285 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS
, "frames_128_to_255_octet" },
1286 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS
, "frames_256_to_511_octet" },
1287 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS
, "frames_512_to_1023_octet" },
1288 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS
, "frames_1024_to_max_octet" },
1289 { MVPP2_MIB_GOOD_OCTETS_SENT
, "good_octets_sent", true },
1290 { MVPP2_MIB_UNICAST_FRAMES_SENT
, "unicast_frames_sent" },
1291 { MVPP2_MIB_MULTICAST_FRAMES_SENT
, "multicast_frames_sent" },
1292 { MVPP2_MIB_BROADCAST_FRAMES_SENT
, "broadcast_frames_sent" },
1293 { MVPP2_MIB_FC_SENT
, "fc_sent" },
1294 { MVPP2_MIB_FC_RCVD
, "fc_received" },
1295 { MVPP2_MIB_RX_FIFO_OVERRUN
, "rx_fifo_overrun" },
1296 { MVPP2_MIB_UNDERSIZE_RCVD
, "undersize_received" },
1297 { MVPP2_MIB_FRAGMENTS_RCVD
, "fragments_received" },
1298 { MVPP2_MIB_OVERSIZE_RCVD
, "oversize_received" },
1299 { MVPP2_MIB_JABBER_RCVD
, "jabber_received" },
1300 { MVPP2_MIB_MAC_RCV_ERROR
, "mac_receive_error" },
1301 { MVPP2_MIB_BAD_CRC_EVENT
, "bad_crc_event" },
1302 { MVPP2_MIB_COLLISION
, "collision" },
1303 { MVPP2_MIB_LATE_COLLISION
, "late_collision" },
1306 static void mvpp2_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
1309 if (sset
== ETH_SS_STATS
) {
1312 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1313 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1314 &mvpp2_ethtool_regs
[i
].string
, ETH_GSTRING_LEN
);
1318 static void mvpp2_gather_hw_statistics(struct work_struct
*work
)
1320 struct delayed_work
*del_work
= to_delayed_work(work
);
1321 struct mvpp2_port
*port
= container_of(del_work
, struct mvpp2_port
,
1326 mutex_lock(&port
->gather_stats_lock
);
1328 pstats
= port
->ethtool_stats
;
1329 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1330 *pstats
++ += mvpp2_read_count(port
, &mvpp2_ethtool_regs
[i
]);
1332 /* No need to read again the counters right after this function if it
1333 * was called asynchronously by the user (ie. use of ethtool).
1335 cancel_delayed_work(&port
->stats_work
);
1336 queue_delayed_work(port
->priv
->stats_queue
, &port
->stats_work
,
1337 MVPP2_MIB_COUNTERS_STATS_DELAY
);
1339 mutex_unlock(&port
->gather_stats_lock
);
1342 static void mvpp2_ethtool_get_stats(struct net_device
*dev
,
1343 struct ethtool_stats
*stats
, u64
*data
)
1345 struct mvpp2_port
*port
= netdev_priv(dev
);
1347 /* Update statistics for the given port, then take the lock to avoid
1348 * concurrent accesses on the ethtool_stats structure during its copy.
1350 mvpp2_gather_hw_statistics(&port
->stats_work
.work
);
1352 mutex_lock(&port
->gather_stats_lock
);
1353 memcpy(data
, port
->ethtool_stats
,
1354 sizeof(u64
) * ARRAY_SIZE(mvpp2_ethtool_regs
));
1355 mutex_unlock(&port
->gather_stats_lock
);
1358 static int mvpp2_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
1360 if (sset
== ETH_SS_STATS
)
1361 return ARRAY_SIZE(mvpp2_ethtool_regs
);
1366 static void mvpp2_port_reset(struct mvpp2_port
*port
)
1371 /* Read the GOP statistics to reset the hardware counters */
1372 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1373 mvpp2_read_count(port
, &mvpp2_ethtool_regs
[i
]);
1375 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
1376 ~MVPP2_GMAC_PORT_RESET_MASK
;
1377 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
1379 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
1380 MVPP2_GMAC_PORT_RESET_MASK
)
1384 /* Change maximum receive size of the port */
1385 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
1389 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1390 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
1391 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1392 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
1393 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1396 /* Change maximum receive size of the port */
1397 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port
*port
)
1401 val
= readl(port
->base
+ MVPP22_XLG_CTRL1_REG
);
1402 val
&= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK
;
1403 val
|= ((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1404 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS
;
1405 writel(val
, port
->base
+ MVPP22_XLG_CTRL1_REG
);
1408 /* Set defaults to the MVPP2 port */
1409 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
1411 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
1413 if (port
->priv
->hw_version
== MVPP21
) {
1414 /* Update TX FIFO MIN Threshold */
1415 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1416 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
1417 /* Min. TX threshold must be less than minimal packet length */
1418 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1419 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1422 /* Disable Legacy WRR, Disable EJP, Release from reset */
1423 tx_port_num
= mvpp2_egress_port(port
);
1424 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
1426 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
1428 /* Close bandwidth for all queues */
1429 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
1430 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
1431 mvpp2_write(port
->priv
,
1432 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
1435 /* Set refill period to 1 usec, refill tokens
1436 * and bucket size to maximum
1438 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
1439 port
->priv
->tclk
/ USEC_PER_SEC
);
1440 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
1441 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
1442 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1443 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
1444 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
1445 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
1446 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
1448 /* Set MaximumLowLatencyPacketSize value to 256 */
1449 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
1450 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
1451 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1453 /* Enable Rx cache snoop */
1454 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1455 queue
= port
->rxqs
[lrxq
]->id
;
1456 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1457 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
1458 MVPP2_SNOOP_BUF_HDR_MASK
;
1459 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1462 /* At default, mask all interrupts to all present cpus */
1463 mvpp2_interrupts_disable(port
);
1466 /* Enable/disable receiving packets */
1467 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
1472 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1473 queue
= port
->rxqs
[lrxq
]->id
;
1474 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1475 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
1476 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1480 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
1485 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1486 queue
= port
->rxqs
[lrxq
]->id
;
1487 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1488 val
|= MVPP2_RXQ_DISABLE_MASK
;
1489 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1493 /* Enable transmit via physical egress queue
1494 * - HW starts take descriptors from DRAM
1496 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
1500 int tx_port_num
= mvpp2_egress_port(port
);
1502 /* Enable all initialized TXs. */
1504 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
1505 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
1508 qmap
|= (1 << queue
);
1511 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1512 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
1515 /* Disable transmit via physical egress queue
1516 * - HW doesn't take descriptors from DRAM
1518 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
1522 int tx_port_num
= mvpp2_egress_port(port
);
1524 /* Issue stop command for active channels only */
1525 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1526 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
1527 MVPP2_TXP_SCHED_ENQ_MASK
;
1529 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
1530 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
1532 /* Wait for all Tx activity to terminate. */
1535 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
1536 netdev_warn(port
->dev
,
1537 "Tx stop timed out, status=0x%08x\n",
1544 /* Check port TX Command register that all
1545 * Tx queues are stopped
1547 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
1548 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
1551 /* Rx descriptors helper methods */
1553 /* Get number of Rx descriptors occupied by received packets */
1555 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
1557 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
1559 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
1562 /* Update Rx queue status with the number of occupied and available
1563 * Rx descriptor slots.
1566 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
1567 int used_count
, int free_count
)
1569 /* Decrement the number of used descriptors and increment count
1570 * increment the number of free descriptors.
1572 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
1574 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
1577 /* Get pointer to next RX descriptor to be processed by SW */
1578 static inline struct mvpp2_rx_desc
*
1579 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
1581 int rx_desc
= rxq
->next_desc_to_proc
;
1583 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
1584 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
1585 return rxq
->descs
+ rx_desc
;
1588 /* Set rx queue offset */
1589 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
1590 int prxq
, int offset
)
1594 /* Convert offset from bytes to units of 32 bytes */
1595 offset
= offset
>> 5;
1597 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
1598 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
1601 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
1602 MVPP2_RXQ_PACKET_OFFSET_MASK
);
1604 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
1607 /* Tx descriptors helper methods */
1609 /* Get pointer to next Tx descriptor to be processed (send) by HW */
1610 static struct mvpp2_tx_desc
*
1611 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
1613 int tx_desc
= txq
->next_desc_to_proc
;
1615 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
1616 return txq
->descs
+ tx_desc
;
1619 /* Update HW with number of aggregated Tx descriptors to be sent
1621 * Called only from mvpp2_tx(), so migration is disabled, using
1622 * smp_processor_id() is OK.
1624 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
1626 /* aggregated access - relevant TXQ number is written in TX desc */
1627 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
1628 MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
1631 /* Check if there are enough free descriptors in aggregated txq.
1632 * If not, update the number of occupied descriptors and repeat the check.
1634 * Called only from mvpp2_tx(), so migration is disabled, using
1635 * smp_processor_id() is OK.
1637 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
1638 struct mvpp2_tx_queue
*aggr_txq
, int num
)
1640 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
) {
1641 /* Update number of occupied aggregated Tx descriptors */
1642 int cpu
= smp_processor_id();
1643 u32 val
= mvpp2_read_relaxed(priv
,
1644 MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
1646 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
1648 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
)
1654 /* Reserved Tx descriptors allocation request
1656 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
1657 * only by mvpp2_tx(), so migration is disabled, using
1658 * smp_processor_id() is OK.
1660 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
1661 struct mvpp2_tx_queue
*txq
, int num
)
1664 int cpu
= smp_processor_id();
1666 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
1667 mvpp2_percpu_write_relaxed(priv
, cpu
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
1669 val
= mvpp2_percpu_read_relaxed(priv
, cpu
, MVPP2_TXQ_RSVD_RSLT_REG
);
1671 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
1674 /* Check if there are enough reserved descriptors for transmission.
1675 * If not, request chunk of reserved descriptors and check again.
1677 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
1678 struct mvpp2_tx_queue
*txq
,
1679 struct mvpp2_txq_pcpu
*txq_pcpu
,
1682 int req
, cpu
, desc_count
;
1684 if (txq_pcpu
->reserved_num
>= num
)
1687 /* Not enough descriptors reserved! Update the reserved descriptor
1688 * count and check again.
1692 /* Compute total of used descriptors */
1693 for_each_present_cpu(cpu
) {
1694 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
1696 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
1697 desc_count
+= txq_pcpu_aux
->count
;
1698 desc_count
+= txq_pcpu_aux
->reserved_num
;
1701 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
1705 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
1708 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
1710 /* OK, the descriptor could have been updated: check again. */
1711 if (txq_pcpu
->reserved_num
< num
)
1716 /* Release the last allocated Tx descriptor. Useful to handle DMA
1717 * mapping failures in the Tx path.
1719 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
1721 if (txq
->next_desc_to_proc
== 0)
1722 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
1724 txq
->next_desc_to_proc
--;
1727 /* Set Tx descriptors fields relevant for CSUM calculation */
1728 static u32
mvpp2_txq_desc_csum(int l3_offs
, __be16 l3_proto
,
1729 int ip_hdr_len
, int l4_proto
)
1733 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1734 * G_L4_chk, L4_type required only for checksum calculation
1736 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
1737 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
1738 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
1740 if (l3_proto
== htons(ETH_P_IP
)) {
1741 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
1742 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
1744 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
1747 if (l4_proto
== IPPROTO_TCP
) {
1748 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
1749 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
1750 } else if (l4_proto
== IPPROTO_UDP
) {
1751 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
1752 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
1754 command
|= MVPP2_TXD_L4_CSUM_NOT
;
1760 /* Get number of sent descriptors and decrement counter.
1761 * The number of sent descriptors is returned.
1764 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
1765 * (migration disabled) and from the TX completion tasklet (migration
1766 * disabled) so using smp_processor_id() is OK.
1768 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
1769 struct mvpp2_tx_queue
*txq
)
1773 /* Reading status reg resets transmitted descriptor counter */
1774 val
= mvpp2_percpu_read_relaxed(port
->priv
, smp_processor_id(),
1775 MVPP2_TXQ_SENT_REG(txq
->id
));
1777 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
1778 MVPP2_TRANSMITTED_COUNT_OFFSET
;
1781 /* Called through on_each_cpu(), so runs on all CPUs, with migration
1782 * disabled, therefore using smp_processor_id() is OK.
1784 static void mvpp2_txq_sent_counter_clear(void *arg
)
1786 struct mvpp2_port
*port
= arg
;
1789 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
1790 int id
= port
->txqs
[queue
]->id
;
1792 mvpp2_percpu_read(port
->priv
, smp_processor_id(),
1793 MVPP2_TXQ_SENT_REG(id
));
1797 /* Set max sizes for Tx queues */
1798 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
1801 int txq
, tx_port_num
;
1803 mtu
= port
->pkt_size
* 8;
1804 if (mtu
> MVPP2_TXP_MTU_MAX
)
1805 mtu
= MVPP2_TXP_MTU_MAX
;
1807 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
1810 /* Indirect access to registers */
1811 tx_port_num
= mvpp2_egress_port(port
);
1812 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1815 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
1816 val
&= ~MVPP2_TXP_MTU_MAX
;
1818 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
1820 /* TXP token size and all TXQs token size must be larger that MTU */
1821 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
1822 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
1825 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
1827 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
1830 for (txq
= 0; txq
< port
->ntxqs
; txq
++) {
1831 val
= mvpp2_read(port
->priv
,
1832 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
1833 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
1837 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
1839 mvpp2_write(port
->priv
,
1840 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
1846 /* Set the number of packets that will be received before Rx interrupt
1847 * will be generated by HW.
1849 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
1850 struct mvpp2_rx_queue
*rxq
)
1852 int cpu
= get_cpu();
1854 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
1855 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
1857 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
1858 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_THRESH_REG
,
1864 /* For some reason in the LSP this is done on each CPU. Why ? */
1865 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port
*port
,
1866 struct mvpp2_tx_queue
*txq
)
1868 int cpu
= get_cpu();
1871 if (txq
->done_pkts_coal
> MVPP2_TXQ_THRESH_MASK
)
1872 txq
->done_pkts_coal
= MVPP2_TXQ_THRESH_MASK
;
1874 val
= (txq
->done_pkts_coal
<< MVPP2_TXQ_THRESH_OFFSET
);
1875 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
1876 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_THRESH_REG
, val
);
1881 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
1883 u64 tmp
= (u64
)clk_hz
* usec
;
1885 do_div(tmp
, USEC_PER_SEC
);
1887 return tmp
> U32_MAX
? U32_MAX
: tmp
;
1890 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
1892 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
1894 do_div(tmp
, clk_hz
);
1896 return tmp
> U32_MAX
? U32_MAX
: tmp
;
1899 /* Set the time delay in usec before Rx interrupt */
1900 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
1901 struct mvpp2_rx_queue
*rxq
)
1903 unsigned long freq
= port
->priv
->tclk
;
1904 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
1906 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
1908 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
1910 /* re-evaluate to get actual register value */
1911 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
1914 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
1917 static void mvpp2_tx_time_coal_set(struct mvpp2_port
*port
)
1919 unsigned long freq
= port
->priv
->tclk
;
1920 u32 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
1922 if (val
> MVPP2_MAX_ISR_TX_THRESHOLD
) {
1923 port
->tx_time_coal
=
1924 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD
, freq
);
1926 /* re-evaluate to get actual register value */
1927 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
1930 mvpp2_write(port
->priv
, MVPP2_ISR_TX_THRESHOLD_REG(port
->id
), val
);
1933 /* Free Tx queue skbuffs */
1934 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
1935 struct mvpp2_tx_queue
*txq
,
1936 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
1940 for (i
= 0; i
< num
; i
++) {
1941 struct mvpp2_txq_pcpu_buf
*tx_buf
=
1942 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
1944 if (!IS_TSO_HEADER(txq_pcpu
, tx_buf
->dma
))
1945 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
1946 tx_buf
->size
, DMA_TO_DEVICE
);
1948 dev_kfree_skb_any(tx_buf
->skb
);
1950 mvpp2_txq_inc_get(txq_pcpu
);
1954 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
1957 int queue
= fls(cause
) - 1;
1959 return port
->rxqs
[queue
];
1962 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
1965 int queue
= fls(cause
) - 1;
1967 return port
->txqs
[queue
];
1970 /* Handle end of transmission */
1971 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
1972 struct mvpp2_txq_pcpu
*txq_pcpu
)
1974 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
1977 if (txq_pcpu
->cpu
!= smp_processor_id())
1978 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
1980 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
1983 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
1985 txq_pcpu
->count
-= tx_done
;
1987 if (netif_tx_queue_stopped(nq
))
1988 if (txq_pcpu
->count
<= txq_pcpu
->wake_threshold
)
1989 netif_tx_wake_queue(nq
);
1992 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
,
1995 struct mvpp2_tx_queue
*txq
;
1996 struct mvpp2_txq_pcpu
*txq_pcpu
;
1997 unsigned int tx_todo
= 0;
2000 txq
= mvpp2_get_tx_queue(port
, cause
);
2004 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
2006 if (txq_pcpu
->count
) {
2007 mvpp2_txq_done(port
, txq
, txq_pcpu
);
2008 tx_todo
+= txq_pcpu
->count
;
2011 cause
&= ~(1 << txq
->log_id
);
2016 /* Rx/Tx queue initialization/cleanup methods */
2018 /* Allocate and initialize descriptors for aggr TXQ */
2019 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
2020 struct mvpp2_tx_queue
*aggr_txq
, int cpu
,
2025 /* Allocate memory for TX descriptors */
2026 aggr_txq
->descs
= dma_zalloc_coherent(&pdev
->dev
,
2027 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
2028 &aggr_txq
->descs_dma
, GFP_KERNEL
);
2029 if (!aggr_txq
->descs
)
2032 aggr_txq
->last_desc
= MVPP2_AGGR_TXQ_SIZE
- 1;
2034 /* Aggr TXQ no reset WA */
2035 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
2036 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
2038 /* Set Tx descriptors queue starting address indirect
2041 if (priv
->hw_version
== MVPP21
)
2042 txq_dma
= aggr_txq
->descs_dma
;
2044 txq_dma
= aggr_txq
->descs_dma
>>
2045 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
2047 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
), txq_dma
);
2048 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
),
2049 MVPP2_AGGR_TXQ_SIZE
);
2054 /* Create a specified Rx queue */
2055 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
2056 struct mvpp2_rx_queue
*rxq
)
2062 rxq
->size
= port
->rx_ring_size
;
2064 /* Allocate memory for RX descriptors */
2065 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2066 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2067 &rxq
->descs_dma
, GFP_KERNEL
);
2071 rxq
->last_desc
= rxq
->size
- 1;
2073 /* Zero occupied and non-occupied counters - direct access */
2074 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2076 /* Set Rx descriptors queue starting address - indirect access */
2078 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2079 if (port
->priv
->hw_version
== MVPP21
)
2080 rxq_dma
= rxq
->descs_dma
;
2082 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
2083 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
2084 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
2085 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_INDEX_REG
, 0);
2089 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
2091 /* Set coalescing pkts and time */
2092 mvpp2_rx_pkts_coal_set(port
, rxq
);
2093 mvpp2_rx_time_coal_set(port
, rxq
);
2095 /* Add number of descriptors ready for receiving packets */
2096 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
2101 /* Push packets received by the RXQ to BM pool */
2102 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
2103 struct mvpp2_rx_queue
*rxq
)
2107 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2111 for (i
= 0; i
< rx_received
; i
++) {
2112 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2113 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2116 pool
= (status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2117 MVPP2_RXD_BM_POOL_ID_OFFS
;
2119 mvpp2_bm_pool_put(port
, pool
,
2120 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
2121 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
2123 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
2126 /* Cleanup Rx queue */
2127 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
2128 struct mvpp2_rx_queue
*rxq
)
2132 mvpp2_rxq_drop_pkts(port
, rxq
);
2135 dma_free_coherent(port
->dev
->dev
.parent
,
2136 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2142 rxq
->next_desc_to_proc
= 0;
2145 /* Clear Rx descriptors queue starting address and size;
2146 * free descriptor number
2148 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2150 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2151 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
2152 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
2156 /* Create and initialize a Tx queue */
2157 static int mvpp2_txq_init(struct mvpp2_port
*port
,
2158 struct mvpp2_tx_queue
*txq
)
2161 int cpu
, desc
, desc_per_txq
, tx_port_num
;
2162 struct mvpp2_txq_pcpu
*txq_pcpu
;
2164 txq
->size
= port
->tx_ring_size
;
2166 /* Allocate memory for Tx descriptors */
2167 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2168 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2169 &txq
->descs_dma
, GFP_KERNEL
);
2173 txq
->last_desc
= txq
->size
- 1;
2175 /* Set Tx descriptors queue starting address - indirect access */
2177 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2178 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
,
2180 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
,
2181 txq
->size
& MVPP2_TXQ_DESC_SIZE_MASK
);
2182 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_INDEX_REG
, 0);
2183 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_RSVD_CLR_REG
,
2184 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
2185 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
);
2186 val
&= ~MVPP2_TXQ_PENDING_MASK
;
2187 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
, val
);
2189 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2190 * for each existing TXQ.
2191 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2192 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2195 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
2196 (txq
->log_id
* desc_per_txq
);
2198 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
,
2199 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
2200 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
2203 /* WRR / EJP configuration - indirect access */
2204 tx_port_num
= mvpp2_egress_port(port
);
2205 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
2207 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
2208 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
2209 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2210 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
2211 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
2213 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
2214 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
2217 for_each_present_cpu(cpu
) {
2218 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
2219 txq_pcpu
->size
= txq
->size
;
2220 txq_pcpu
->buffs
= kmalloc_array(txq_pcpu
->size
,
2221 sizeof(*txq_pcpu
->buffs
),
2223 if (!txq_pcpu
->buffs
)
2226 txq_pcpu
->count
= 0;
2227 txq_pcpu
->reserved_num
= 0;
2228 txq_pcpu
->txq_put_index
= 0;
2229 txq_pcpu
->txq_get_index
= 0;
2230 txq_pcpu
->tso_headers
= NULL
;
2232 txq_pcpu
->stop_threshold
= txq
->size
- MVPP2_MAX_SKB_DESCS
;
2233 txq_pcpu
->wake_threshold
= txq_pcpu
->stop_threshold
/ 2;
2235 txq_pcpu
->tso_headers
=
2236 dma_alloc_coherent(port
->dev
->dev
.parent
,
2237 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2238 &txq_pcpu
->tso_headers_dma
,
2240 if (!txq_pcpu
->tso_headers
)
2247 /* Free allocated TXQ resources */
2248 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
2249 struct mvpp2_tx_queue
*txq
)
2251 struct mvpp2_txq_pcpu
*txq_pcpu
;
2254 for_each_present_cpu(cpu
) {
2255 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
2256 kfree(txq_pcpu
->buffs
);
2258 if (txq_pcpu
->tso_headers
)
2259 dma_free_coherent(port
->dev
->dev
.parent
,
2260 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2261 txq_pcpu
->tso_headers
,
2262 txq_pcpu
->tso_headers_dma
);
2264 txq_pcpu
->tso_headers
= NULL
;
2268 dma_free_coherent(port
->dev
->dev
.parent
,
2269 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2270 txq
->descs
, txq
->descs_dma
);
2274 txq
->next_desc_to_proc
= 0;
2277 /* Set minimum bandwidth for disabled TXQs */
2278 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
2280 /* Set Tx descriptors queue starting address and size */
2282 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2283 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
2284 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
2288 /* Cleanup Tx ports */
2289 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
2291 struct mvpp2_txq_pcpu
*txq_pcpu
;
2292 int delay
, pending
, cpu
;
2296 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2297 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
);
2298 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
2299 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2301 /* The napi queue has been stopped so wait for all packets
2302 * to be transmitted.
2306 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
2307 netdev_warn(port
->dev
,
2308 "port %d: cleaning queue %d timed out\n",
2309 port
->id
, txq
->log_id
);
2315 pending
= mvpp2_percpu_read(port
->priv
, cpu
,
2316 MVPP2_TXQ_PENDING_REG
);
2317 pending
&= MVPP2_TXQ_PENDING_MASK
;
2320 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
2321 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2324 for_each_present_cpu(cpu
) {
2325 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
2327 /* Release all packets */
2328 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
2331 txq_pcpu
->count
= 0;
2332 txq_pcpu
->txq_put_index
= 0;
2333 txq_pcpu
->txq_get_index
= 0;
2337 /* Cleanup all Tx queues */
2338 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
2340 struct mvpp2_tx_queue
*txq
;
2344 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
2346 /* Reset Tx ports and delete Tx queues */
2347 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2348 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2350 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2351 txq
= port
->txqs
[queue
];
2352 mvpp2_txq_clean(port
, txq
);
2353 mvpp2_txq_deinit(port
, txq
);
2356 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2358 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2359 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2362 /* Cleanup all Rx queues */
2363 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
2367 for (queue
= 0; queue
< port
->nrxqs
; queue
++)
2368 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
2371 /* Init all Rx queues for port */
2372 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
2376 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
2377 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
2384 mvpp2_cleanup_rxqs(port
);
2388 /* Init all tx queues for port */
2389 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
2391 struct mvpp2_tx_queue
*txq
;
2394 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2395 txq
= port
->txqs
[queue
];
2396 err
= mvpp2_txq_init(port
, txq
);
2401 if (port
->has_tx_irqs
) {
2402 mvpp2_tx_time_coal_set(port
);
2403 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2404 txq
= port
->txqs
[queue
];
2405 mvpp2_tx_pkts_coal_set(port
, txq
);
2409 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2413 mvpp2_cleanup_txqs(port
);
2417 /* The callback for per-port interrupt */
2418 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
2420 struct mvpp2_queue_vector
*qv
= dev_id
;
2422 mvpp2_qvec_interrupt_disable(qv
);
2424 napi_schedule(&qv
->napi
);
2429 /* Per-port interrupt for link status changes */
2430 static irqreturn_t
mvpp2_link_status_isr(int irq
, void *dev_id
)
2432 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
2433 struct net_device
*dev
= port
->dev
;
2434 bool event
= false, link
= false;
2437 mvpp22_gop_mask_irq(port
);
2439 if (port
->gop_id
== 0 &&
2440 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
) {
2441 val
= readl(port
->base
+ MVPP22_XLG_INT_STAT
);
2442 if (val
& MVPP22_XLG_INT_STAT_LINK
) {
2444 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
2445 if (val
& MVPP22_XLG_STATUS_LINK_UP
)
2448 } else if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
2449 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
2450 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
2451 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
2452 val
= readl(port
->base
+ MVPP22_GMAC_INT_STAT
);
2453 if (val
& MVPP22_GMAC_INT_STAT_LINK
) {
2455 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
2456 if (val
& MVPP2_GMAC_STATUS0_LINK_UP
)
2461 if (port
->phylink
) {
2462 phylink_mac_change(port
->phylink
, link
);
2466 if (!netif_running(dev
) || !event
)
2470 mvpp2_interrupts_enable(port
);
2472 mvpp2_egress_enable(port
);
2473 mvpp2_ingress_enable(port
);
2474 netif_carrier_on(dev
);
2475 netif_tx_wake_all_queues(dev
);
2477 netif_tx_stop_all_queues(dev
);
2478 netif_carrier_off(dev
);
2479 mvpp2_ingress_disable(port
);
2480 mvpp2_egress_disable(port
);
2482 mvpp2_interrupts_disable(port
);
2486 mvpp22_gop_unmask_irq(port
);
2490 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
2494 if (!port_pcpu
->timer_scheduled
) {
2495 port_pcpu
->timer_scheduled
= true;
2496 interval
= MVPP2_TXDONE_HRTIMER_PERIOD_NS
;
2497 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
2498 HRTIMER_MODE_REL_PINNED
);
2502 static void mvpp2_tx_proc_cb(unsigned long data
)
2504 struct net_device
*dev
= (struct net_device
*)data
;
2505 struct mvpp2_port
*port
= netdev_priv(dev
);
2506 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
2507 unsigned int tx_todo
, cause
;
2509 if (!netif_running(dev
))
2511 port_pcpu
->timer_scheduled
= false;
2513 /* Process all the Tx queues */
2514 cause
= (1 << port
->ntxqs
) - 1;
2515 tx_todo
= mvpp2_tx_done(port
, cause
, smp_processor_id());
2517 /* Set the timer in case not all the packets were processed */
2519 mvpp2_timer_set(port_pcpu
);
2522 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
2524 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
2525 struct mvpp2_port_pcpu
,
2528 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
2530 return HRTIMER_NORESTART
;
2533 /* Main RX/TX processing routines */
2535 /* Display more error info */
2536 static void mvpp2_rx_error(struct mvpp2_port
*port
,
2537 struct mvpp2_rx_desc
*rx_desc
)
2539 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2540 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2541 char *err_str
= NULL
;
2543 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
2544 case MVPP2_RXD_ERR_CRC
:
2547 case MVPP2_RXD_ERR_OVERRUN
:
2548 err_str
= "overrun";
2550 case MVPP2_RXD_ERR_RESOURCE
:
2551 err_str
= "resource";
2554 if (err_str
&& net_ratelimit())
2555 netdev_err(port
->dev
,
2556 "bad rx status %08x (%s error), size=%zu\n",
2557 status
, err_str
, sz
);
2560 /* Handle RX checksum offload */
2561 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
2562 struct sk_buff
*skb
)
2564 if (((status
& MVPP2_RXD_L3_IP4
) &&
2565 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
2566 (status
& MVPP2_RXD_L3_IP6
))
2567 if (((status
& MVPP2_RXD_L4_UDP
) ||
2568 (status
& MVPP2_RXD_L4_TCP
)) &&
2569 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
2571 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2575 skb
->ip_summed
= CHECKSUM_NONE
;
2578 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
2579 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
2580 struct mvpp2_bm_pool
*bm_pool
, int pool
)
2582 dma_addr_t dma_addr
;
2583 phys_addr_t phys_addr
;
2586 /* No recycle or too many buffers are in use, so allocate a new skb */
2587 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
2592 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2597 /* Handle tx checksum */
2598 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
2600 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2603 __be16 l3_proto
= vlan_get_protocol(skb
);
2605 if (l3_proto
== htons(ETH_P_IP
)) {
2606 struct iphdr
*ip4h
= ip_hdr(skb
);
2608 /* Calculate IPv4 checksum and L4 checksum */
2609 ip_hdr_len
= ip4h
->ihl
;
2610 l4_proto
= ip4h
->protocol
;
2611 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
2612 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2614 /* Read l4_protocol from one of IPv6 extra headers */
2615 if (skb_network_header_len(skb
) > 0)
2616 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
2617 l4_proto
= ip6h
->nexthdr
;
2619 return MVPP2_TXD_L4_CSUM_NOT
;
2622 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
2623 l3_proto
, ip_hdr_len
, l4_proto
);
2626 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
2629 /* Main rx processing */
2630 static int mvpp2_rx(struct mvpp2_port
*port
, struct napi_struct
*napi
,
2631 int rx_todo
, struct mvpp2_rx_queue
*rxq
)
2633 struct net_device
*dev
= port
->dev
;
2639 /* Get number of received packets and clamp the to-do */
2640 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2641 if (rx_todo
> rx_received
)
2642 rx_todo
= rx_received
;
2644 while (rx_done
< rx_todo
) {
2645 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2646 struct mvpp2_bm_pool
*bm_pool
;
2647 struct sk_buff
*skb
;
2648 unsigned int frag_size
;
2649 dma_addr_t dma_addr
;
2650 phys_addr_t phys_addr
;
2652 int pool
, rx_bytes
, err
;
2656 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2657 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2658 rx_bytes
-= MVPP2_MH_SIZE
;
2659 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
2660 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
2661 data
= (void *)phys_to_virt(phys_addr
);
2663 pool
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2664 MVPP2_RXD_BM_POOL_ID_OFFS
;
2665 bm_pool
= &port
->priv
->bm_pools
[pool
];
2667 /* In case of an error, release the requested buffer pointer
2668 * to the Buffer Manager. This request process is controlled
2669 * by the hardware, and the information about the buffer is
2670 * comprised by the RX descriptor.
2672 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
2674 dev
->stats
.rx_errors
++;
2675 mvpp2_rx_error(port
, rx_desc
);
2676 /* Return the buffer to the pool */
2677 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2681 if (bm_pool
->frag_size
> PAGE_SIZE
)
2684 frag_size
= bm_pool
->frag_size
;
2686 skb
= build_skb(data
, frag_size
);
2688 netdev_warn(port
->dev
, "skb build failed\n");
2689 goto err_drop_frame
;
2692 err
= mvpp2_rx_refill(port
, bm_pool
, pool
);
2694 netdev_err(port
->dev
, "failed to refill BM pools\n");
2695 goto err_drop_frame
;
2698 dma_unmap_single(dev
->dev
.parent
, dma_addr
,
2699 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2702 rcvd_bytes
+= rx_bytes
;
2704 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
2705 skb_put(skb
, rx_bytes
);
2706 skb
->protocol
= eth_type_trans(skb
, dev
);
2707 mvpp2_rx_csum(port
, rx_status
, skb
);
2709 napi_gro_receive(napi
, skb
);
2713 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
2715 u64_stats_update_begin(&stats
->syncp
);
2716 stats
->rx_packets
+= rcvd_pkts
;
2717 stats
->rx_bytes
+= rcvd_bytes
;
2718 u64_stats_update_end(&stats
->syncp
);
2721 /* Update Rx queue management counters */
2723 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
2729 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
2730 struct mvpp2_tx_desc
*desc
)
2732 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
2734 dma_addr_t buf_dma_addr
=
2735 mvpp2_txdesc_dma_addr_get(port
, desc
);
2737 mvpp2_txdesc_size_get(port
, desc
);
2738 if (!IS_TSO_HEADER(txq_pcpu
, buf_dma_addr
))
2739 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
2740 buf_sz
, DMA_TO_DEVICE
);
2741 mvpp2_txq_desc_put(txq
);
2744 /* Handle tx fragmentation processing */
2745 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
2746 struct mvpp2_tx_queue
*aggr_txq
,
2747 struct mvpp2_tx_queue
*txq
)
2749 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
2750 struct mvpp2_tx_desc
*tx_desc
;
2752 dma_addr_t buf_dma_addr
;
2754 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2755 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2756 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
2758 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2759 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2760 mvpp2_txdesc_size_set(port
, tx_desc
, frag
->size
);
2762 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
2763 frag
->size
, DMA_TO_DEVICE
);
2764 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
2765 mvpp2_txq_desc_put(txq
);
2769 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2771 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
2772 /* Last descriptor */
2773 mvpp2_txdesc_cmd_set(port
, tx_desc
,
2775 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
2777 /* Descriptor in the middle: Not First, Not Last */
2778 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
2779 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2785 /* Release all descriptors that were used to map fragments of
2786 * this packet, as well as the corresponding DMA mappings
2788 for (i
= i
- 1; i
>= 0; i
--) {
2789 tx_desc
= txq
->descs
+ i
;
2790 tx_desc_unmap_put(port
, txq
, tx_desc
);
2796 static inline void mvpp2_tso_put_hdr(struct sk_buff
*skb
,
2797 struct net_device
*dev
,
2798 struct mvpp2_tx_queue
*txq
,
2799 struct mvpp2_tx_queue
*aggr_txq
,
2800 struct mvpp2_txq_pcpu
*txq_pcpu
,
2803 struct mvpp2_port
*port
= netdev_priv(dev
);
2804 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2807 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2808 mvpp2_txdesc_size_set(port
, tx_desc
, hdr_sz
);
2810 addr
= txq_pcpu
->tso_headers_dma
+
2811 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
2812 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, addr
);
2814 mvpp2_txdesc_cmd_set(port
, tx_desc
, mvpp2_skb_tx_csum(port
, skb
) |
2816 MVPP2_TXD_PADDING_DISABLE
);
2817 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2820 static inline int mvpp2_tso_put_data(struct sk_buff
*skb
,
2821 struct net_device
*dev
, struct tso_t
*tso
,
2822 struct mvpp2_tx_queue
*txq
,
2823 struct mvpp2_tx_queue
*aggr_txq
,
2824 struct mvpp2_txq_pcpu
*txq_pcpu
,
2825 int sz
, bool left
, bool last
)
2827 struct mvpp2_port
*port
= netdev_priv(dev
);
2828 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2829 dma_addr_t buf_dma_addr
;
2831 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2832 mvpp2_txdesc_size_set(port
, tx_desc
, sz
);
2834 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, tso
->data
, sz
,
2836 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
2837 mvpp2_txq_desc_put(txq
);
2841 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2844 mvpp2_txdesc_cmd_set(port
, tx_desc
, MVPP2_TXD_L_DESC
);
2846 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
2850 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
2853 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2857 static int mvpp2_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2858 struct mvpp2_tx_queue
*txq
,
2859 struct mvpp2_tx_queue
*aggr_txq
,
2860 struct mvpp2_txq_pcpu
*txq_pcpu
)
2862 struct mvpp2_port
*port
= netdev_priv(dev
);
2864 int hdr_sz
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2865 int i
, len
, descs
= 0;
2867 /* Check number of available descriptors */
2868 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
,
2869 tso_count_descs(skb
)) ||
2870 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
, txq_pcpu
,
2871 tso_count_descs(skb
)))
2874 tso_start(skb
, &tso
);
2875 len
= skb
->len
- hdr_sz
;
2877 int left
= min_t(int, skb_shinfo(skb
)->gso_size
, len
);
2878 char *hdr
= txq_pcpu
->tso_headers
+
2879 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
2884 tso_build_hdr(skb
, hdr
, &tso
, left
, len
== 0);
2885 mvpp2_tso_put_hdr(skb
, dev
, txq
, aggr_txq
, txq_pcpu
, hdr_sz
);
2888 int sz
= min_t(int, tso
.size
, left
);
2892 if (mvpp2_tso_put_data(skb
, dev
, &tso
, txq
, aggr_txq
,
2893 txq_pcpu
, sz
, left
, len
== 0))
2895 tso_build_data(skb
, &tso
, sz
);
2902 for (i
= descs
- 1; i
>= 0; i
--) {
2903 struct mvpp2_tx_desc
*tx_desc
= txq
->descs
+ i
;
2904 tx_desc_unmap_put(port
, txq
, tx_desc
);
2909 /* Main tx processing */
2910 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2912 struct mvpp2_port
*port
= netdev_priv(dev
);
2913 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
2914 struct mvpp2_txq_pcpu
*txq_pcpu
;
2915 struct mvpp2_tx_desc
*tx_desc
;
2916 dma_addr_t buf_dma_addr
;
2921 txq_id
= skb_get_queue_mapping(skb
);
2922 txq
= port
->txqs
[txq_id
];
2923 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
2924 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
2926 if (skb_is_gso(skb
)) {
2927 frags
= mvpp2_tx_tso(skb
, dev
, txq
, aggr_txq
, txq_pcpu
);
2930 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2932 /* Check number of available descriptors */
2933 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
2934 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
2940 /* Get a descriptor for the first part of the packet */
2941 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2942 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2943 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
2945 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2946 skb_headlen(skb
), DMA_TO_DEVICE
);
2947 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
2948 mvpp2_txq_desc_put(txq
);
2953 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2955 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
2958 /* First and Last descriptor */
2959 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
2960 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
2961 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
2963 /* First but not Last */
2964 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
2965 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
2966 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2968 /* Continue with other skb fragments */
2969 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
2970 tx_desc_unmap_put(port
, txq
, tx_desc
);
2977 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
2978 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
2980 txq_pcpu
->reserved_num
-= frags
;
2981 txq_pcpu
->count
+= frags
;
2982 aggr_txq
->count
+= frags
;
2984 /* Enable transmit */
2986 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
2988 if (txq_pcpu
->count
>= txq_pcpu
->stop_threshold
)
2989 netif_tx_stop_queue(nq
);
2991 u64_stats_update_begin(&stats
->syncp
);
2992 stats
->tx_packets
++;
2993 stats
->tx_bytes
+= skb
->len
;
2994 u64_stats_update_end(&stats
->syncp
);
2996 dev
->stats
.tx_dropped
++;
2997 dev_kfree_skb_any(skb
);
3000 /* Finalize TX processing */
3001 if (!port
->has_tx_irqs
&& txq_pcpu
->count
>= txq
->done_pkts_coal
)
3002 mvpp2_txq_done(port
, txq
, txq_pcpu
);
3004 /* Set the timer in case not all frags were processed */
3005 if (!port
->has_tx_irqs
&& txq_pcpu
->count
<= frags
&&
3006 txq_pcpu
->count
> 0) {
3007 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
3009 mvpp2_timer_set(port_pcpu
);
3012 return NETDEV_TX_OK
;
3015 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
3017 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
3018 netdev_err(dev
, "FCS error\n");
3019 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
3020 netdev_err(dev
, "rx fifo overrun error\n");
3021 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
3022 netdev_err(dev
, "tx fifo underrun error\n");
3025 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
3027 u32 cause_rx_tx
, cause_rx
, cause_tx
, cause_misc
;
3029 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
3030 struct mvpp2_queue_vector
*qv
;
3031 int cpu
= smp_processor_id();
3033 qv
= container_of(napi
, struct mvpp2_queue_vector
, napi
);
3035 /* Rx/Tx cause register
3037 * Bits 0-15: each bit indicates received packets on the Rx queue
3038 * (bit 0 is for Rx queue 0).
3040 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3041 * (bit 16 is for Tx queue 0).
3043 * Each CPU has its own Rx/Tx cause register
3045 cause_rx_tx
= mvpp2_percpu_read_relaxed(port
->priv
, qv
->sw_thread_id
,
3046 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
3048 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
3050 mvpp2_cause_error(port
->dev
, cause_misc
);
3052 /* Clear the cause register */
3053 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
3054 mvpp2_percpu_write(port
->priv
, cpu
,
3055 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
3056 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
3059 if (port
->has_tx_irqs
) {
3060 cause_tx
= cause_rx_tx
& MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
3062 cause_tx
>>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET
;
3063 mvpp2_tx_done(port
, cause_tx
, qv
->sw_thread_id
);
3067 /* Process RX packets */
3068 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
3069 cause_rx
<<= qv
->first_rxq
;
3070 cause_rx
|= qv
->pending_cause_rx
;
3071 while (cause_rx
&& budget
> 0) {
3073 struct mvpp2_rx_queue
*rxq
;
3075 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
3079 count
= mvpp2_rx(port
, napi
, budget
, rxq
);
3083 /* Clear the bit associated to this Rx queue
3084 * so that next iteration will continue from
3085 * the next Rx queue.
3087 cause_rx
&= ~(1 << rxq
->logic_rxq
);
3093 napi_complete_done(napi
, rx_done
);
3095 mvpp2_qvec_interrupt_enable(qv
);
3097 qv
->pending_cause_rx
= cause_rx
;
3101 static void mvpp22_mode_reconfigure(struct mvpp2_port
*port
)
3105 /* comphy reconfiguration */
3106 mvpp22_comphy_init(port
);
3108 /* gop reconfiguration */
3109 mvpp22_gop_init(port
);
3111 /* Only GOP port 0 has an XLG MAC */
3112 if (port
->gop_id
== 0) {
3113 ctrl3
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
3114 ctrl3
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
3116 if (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
3117 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
3118 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_10G
;
3120 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
3122 writel(ctrl3
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
3125 if (port
->gop_id
== 0 &&
3126 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
3127 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
))
3128 mvpp2_xlg_max_rx_size_set(port
);
3130 mvpp2_gmac_max_rx_size_set(port
);
3133 /* Set hw internals when starting port */
3134 static void mvpp2_start_dev(struct mvpp2_port
*port
)
3138 mvpp2_txp_max_tx_size_set(port
);
3140 for (i
= 0; i
< port
->nqvecs
; i
++)
3141 napi_enable(&port
->qvecs
[i
].napi
);
3143 /* Enable interrupts on all CPUs */
3144 mvpp2_interrupts_enable(port
);
3146 if (port
->priv
->hw_version
== MVPP22
)
3147 mvpp22_mode_reconfigure(port
);
3149 if (port
->phylink
) {
3150 netif_carrier_off(port
->dev
);
3151 phylink_start(port
->phylink
);
3153 /* Phylink isn't used as of now for ACPI, so the MAC has to be
3154 * configured manually when the interface is started. This will
3155 * be removed as soon as the phylink ACPI support lands in.
3157 struct phylink_link_state state
= {
3158 .interface
= port
->phy_interface
,
3160 mvpp2_mac_config(port
->dev
, MLO_AN_INBAND
, &state
);
3161 mvpp2_mac_link_up(port
->dev
, MLO_AN_INBAND
, port
->phy_interface
,
3165 netif_tx_start_all_queues(port
->dev
);
3168 /* Set hw internals when stopping port */
3169 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
3173 /* Disable interrupts on all CPUs */
3174 mvpp2_interrupts_disable(port
);
3176 for (i
= 0; i
< port
->nqvecs
; i
++)
3177 napi_disable(&port
->qvecs
[i
].napi
);
3180 phylink_stop(port
->phylink
);
3181 phy_power_off(port
->comphy
);
3184 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
3185 struct ethtool_ringparam
*ring
)
3187 u16 new_rx_pending
= ring
->rx_pending
;
3188 u16 new_tx_pending
= ring
->tx_pending
;
3190 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
3193 if (ring
->rx_pending
> MVPP2_MAX_RXD_MAX
)
3194 new_rx_pending
= MVPP2_MAX_RXD_MAX
;
3195 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
3196 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
3198 if (ring
->tx_pending
> MVPP2_MAX_TXD_MAX
)
3199 new_tx_pending
= MVPP2_MAX_TXD_MAX
;
3200 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
3201 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
3203 /* The Tx ring size cannot be smaller than the minimum number of
3204 * descriptors needed for TSO.
3206 if (new_tx_pending
< MVPP2_MAX_SKB_DESCS
)
3207 new_tx_pending
= ALIGN(MVPP2_MAX_SKB_DESCS
, 32);
3209 if (ring
->rx_pending
!= new_rx_pending
) {
3210 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
3211 ring
->rx_pending
, new_rx_pending
);
3212 ring
->rx_pending
= new_rx_pending
;
3215 if (ring
->tx_pending
!= new_tx_pending
) {
3216 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
3217 ring
->tx_pending
, new_tx_pending
);
3218 ring
->tx_pending
= new_tx_pending
;
3224 static void mvpp21_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
3226 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
3228 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3229 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
3230 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
3231 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3232 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3233 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3234 addr
[3] = mac_addr_h
& 0xFF;
3235 addr
[4] = mac_addr_m
& 0xFF;
3236 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
3239 static int mvpp2_irqs_init(struct mvpp2_port
*port
)
3243 for (i
= 0; i
< port
->nqvecs
; i
++) {
3244 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3246 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
)
3247 irq_set_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3249 err
= request_irq(qv
->irq
, mvpp2_isr
, 0, port
->dev
->name
, qv
);
3253 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
)
3254 irq_set_affinity_hint(qv
->irq
,
3255 cpumask_of(qv
->sw_thread_id
));
3260 for (i
= 0; i
< port
->nqvecs
; i
++) {
3261 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3263 irq_set_affinity_hint(qv
->irq
, NULL
);
3264 free_irq(qv
->irq
, qv
);
3270 static void mvpp2_irqs_deinit(struct mvpp2_port
*port
)
3274 for (i
= 0; i
< port
->nqvecs
; i
++) {
3275 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3277 irq_set_affinity_hint(qv
->irq
, NULL
);
3278 irq_clear_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3279 free_irq(qv
->irq
, qv
);
3283 static bool mvpp22_rss_is_supported(void)
3285 return queue_mode
== MVPP2_QDIST_MULTI_MODE
;
3288 static int mvpp2_open(struct net_device
*dev
)
3290 struct mvpp2_port
*port
= netdev_priv(dev
);
3291 struct mvpp2
*priv
= port
->priv
;
3292 unsigned char mac_bcast
[ETH_ALEN
] = {
3293 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3297 err
= mvpp2_prs_mac_da_accept(port
, mac_bcast
, true);
3299 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
3302 err
= mvpp2_prs_mac_da_accept(port
, dev
->dev_addr
, true);
3304 netdev_err(dev
, "mvpp2_prs_mac_da_accept own addr failed\n");
3307 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
3309 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
3312 err
= mvpp2_prs_def_flow(port
);
3314 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
3318 /* Allocate the Rx/Tx queues */
3319 err
= mvpp2_setup_rxqs(port
);
3321 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
3325 err
= mvpp2_setup_txqs(port
);
3327 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
3328 goto err_cleanup_rxqs
;
3331 err
= mvpp2_irqs_init(port
);
3333 netdev_err(port
->dev
, "cannot init IRQs\n");
3334 goto err_cleanup_txqs
;
3337 /* Phylink isn't supported yet in ACPI mode */
3338 if (port
->of_node
) {
3339 err
= phylink_of_phy_connect(port
->phylink
, port
->of_node
, 0);
3341 netdev_err(port
->dev
, "could not attach PHY (%d)\n",
3349 if (priv
->hw_version
== MVPP22
&& port
->link_irq
&& !port
->phylink
) {
3350 err
= request_irq(port
->link_irq
, mvpp2_link_status_isr
, 0,
3353 netdev_err(port
->dev
, "cannot request link IRQ %d\n",
3358 mvpp22_gop_setup_irq(port
);
3360 /* In default link is down */
3361 netif_carrier_off(port
->dev
);
3369 netdev_err(port
->dev
,
3370 "invalid configuration: no dt or link IRQ");
3374 /* Unmask interrupts on all CPUs */
3375 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
3376 mvpp2_shared_interrupt_mask_unmask(port
, false);
3378 mvpp2_start_dev(port
);
3380 /* Start hardware statistics gathering */
3381 queue_delayed_work(priv
->stats_queue
, &port
->stats_work
,
3382 MVPP2_MIB_COUNTERS_STATS_DELAY
);
3387 mvpp2_irqs_deinit(port
);
3389 mvpp2_cleanup_txqs(port
);
3391 mvpp2_cleanup_rxqs(port
);
3395 static int mvpp2_stop(struct net_device
*dev
)
3397 struct mvpp2_port
*port
= netdev_priv(dev
);
3398 struct mvpp2_port_pcpu
*port_pcpu
;
3401 mvpp2_stop_dev(port
);
3403 /* Mask interrupts on all CPUs */
3404 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
3405 mvpp2_shared_interrupt_mask_unmask(port
, true);
3408 phylink_disconnect_phy(port
->phylink
);
3410 free_irq(port
->link_irq
, port
);
3412 mvpp2_irqs_deinit(port
);
3413 if (!port
->has_tx_irqs
) {
3414 for_each_present_cpu(cpu
) {
3415 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
3417 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
3418 port_pcpu
->timer_scheduled
= false;
3419 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
3422 mvpp2_cleanup_rxqs(port
);
3423 mvpp2_cleanup_txqs(port
);
3425 cancel_delayed_work_sync(&port
->stats_work
);
3430 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port
*port
,
3431 struct netdev_hw_addr_list
*list
)
3433 struct netdev_hw_addr
*ha
;
3436 netdev_hw_addr_list_for_each(ha
, list
) {
3437 ret
= mvpp2_prs_mac_da_accept(port
, ha
->addr
, true);
3445 static void mvpp2_set_rx_promisc(struct mvpp2_port
*port
, bool enable
)
3447 if (!enable
&& (port
->dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3448 mvpp2_prs_vid_enable_filtering(port
);
3450 mvpp2_prs_vid_disable_filtering(port
);
3452 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3453 MVPP2_PRS_L2_UNI_CAST
, enable
);
3455 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3456 MVPP2_PRS_L2_MULTI_CAST
, enable
);
3459 static void mvpp2_set_rx_mode(struct net_device
*dev
)
3461 struct mvpp2_port
*port
= netdev_priv(dev
);
3463 /* Clear the whole UC and MC list */
3464 mvpp2_prs_mac_del_all(port
);
3466 if (dev
->flags
& IFF_PROMISC
) {
3467 mvpp2_set_rx_promisc(port
, true);
3471 mvpp2_set_rx_promisc(port
, false);
3473 if (netdev_uc_count(dev
) > MVPP2_PRS_MAC_UC_FILT_MAX
||
3474 mvpp2_prs_mac_da_accept_list(port
, &dev
->uc
))
3475 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3476 MVPP2_PRS_L2_UNI_CAST
, true);
3478 if (dev
->flags
& IFF_ALLMULTI
) {
3479 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3480 MVPP2_PRS_L2_MULTI_CAST
, true);
3484 if (netdev_mc_count(dev
) > MVPP2_PRS_MAC_MC_FILT_MAX
||
3485 mvpp2_prs_mac_da_accept_list(port
, &dev
->mc
))
3486 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3487 MVPP2_PRS_L2_MULTI_CAST
, true);
3490 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
3492 const struct sockaddr
*addr
= p
;
3495 if (!is_valid_ether_addr(addr
->sa_data
))
3496 return -EADDRNOTAVAIL
;
3498 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
3500 /* Reconfigure parser accept the original MAC address */
3501 mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
3502 netdev_err(dev
, "failed to change MAC address\n");
3507 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
3509 struct mvpp2_port
*port
= netdev_priv(dev
);
3512 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
3513 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
3514 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
3515 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
3518 if (!netif_running(dev
)) {
3519 err
= mvpp2_bm_update_mtu(dev
, mtu
);
3521 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3525 /* Reconfigure BM to the original MTU */
3526 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
3531 mvpp2_stop_dev(port
);
3533 err
= mvpp2_bm_update_mtu(dev
, mtu
);
3535 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3539 /* Reconfigure BM to the original MTU */
3540 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
3545 mvpp2_start_dev(port
);
3546 mvpp2_egress_enable(port
);
3547 mvpp2_ingress_enable(port
);
3551 netdev_err(dev
, "failed to change MTU\n");
3556 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3558 struct mvpp2_port
*port
= netdev_priv(dev
);
3562 for_each_possible_cpu(cpu
) {
3563 struct mvpp2_pcpu_stats
*cpu_stats
;
3569 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
3571 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
3572 rx_packets
= cpu_stats
->rx_packets
;
3573 rx_bytes
= cpu_stats
->rx_bytes
;
3574 tx_packets
= cpu_stats
->tx_packets
;
3575 tx_bytes
= cpu_stats
->tx_bytes
;
3576 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
3578 stats
->rx_packets
+= rx_packets
;
3579 stats
->rx_bytes
+= rx_bytes
;
3580 stats
->tx_packets
+= tx_packets
;
3581 stats
->tx_bytes
+= tx_bytes
;
3584 stats
->rx_errors
= dev
->stats
.rx_errors
;
3585 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3586 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3589 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3591 struct mvpp2_port
*port
= netdev_priv(dev
);
3596 return phylink_mii_ioctl(port
->phylink
, ifr
, cmd
);
3599 static int mvpp2_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3601 struct mvpp2_port
*port
= netdev_priv(dev
);
3604 ret
= mvpp2_prs_vid_entry_add(port
, vid
);
3606 netdev_err(dev
, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3607 MVPP2_PRS_VLAN_FILT_MAX
- 1);
3611 static int mvpp2_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3613 struct mvpp2_port
*port
= netdev_priv(dev
);
3615 mvpp2_prs_vid_entry_remove(port
, vid
);
3619 static int mvpp2_set_features(struct net_device
*dev
,
3620 netdev_features_t features
)
3622 netdev_features_t changed
= dev
->features
^ features
;
3623 struct mvpp2_port
*port
= netdev_priv(dev
);
3625 if (changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
3626 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
3627 mvpp2_prs_vid_enable_filtering(port
);
3629 /* Invalidate all registered VID filters for this
3632 mvpp2_prs_vid_remove_all(port
);
3634 mvpp2_prs_vid_disable_filtering(port
);
3638 if (changed
& NETIF_F_RXHASH
) {
3639 if (features
& NETIF_F_RXHASH
)
3640 mvpp22_rss_enable(port
);
3642 mvpp22_rss_disable(port
);
3648 /* Ethtool methods */
3650 static int mvpp2_ethtool_nway_reset(struct net_device
*dev
)
3652 struct mvpp2_port
*port
= netdev_priv(dev
);
3657 return phylink_ethtool_nway_reset(port
->phylink
);
3660 /* Set interrupt coalescing for ethtools */
3661 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
3662 struct ethtool_coalesce
*c
)
3664 struct mvpp2_port
*port
= netdev_priv(dev
);
3667 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
3668 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
3670 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3671 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3672 mvpp2_rx_pkts_coal_set(port
, rxq
);
3673 mvpp2_rx_time_coal_set(port
, rxq
);
3676 if (port
->has_tx_irqs
) {
3677 port
->tx_time_coal
= c
->tx_coalesce_usecs
;
3678 mvpp2_tx_time_coal_set(port
);
3681 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
3682 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
3684 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3686 if (port
->has_tx_irqs
)
3687 mvpp2_tx_pkts_coal_set(port
, txq
);
3693 /* get coalescing for ethtools */
3694 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
3695 struct ethtool_coalesce
*c
)
3697 struct mvpp2_port
*port
= netdev_priv(dev
);
3699 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
3700 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
3701 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
3702 c
->tx_coalesce_usecs
= port
->tx_time_coal
;
3706 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
3707 struct ethtool_drvinfo
*drvinfo
)
3709 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
3710 sizeof(drvinfo
->driver
));
3711 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
3712 sizeof(drvinfo
->version
));
3713 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3714 sizeof(drvinfo
->bus_info
));
3717 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
3718 struct ethtool_ringparam
*ring
)
3720 struct mvpp2_port
*port
= netdev_priv(dev
);
3722 ring
->rx_max_pending
= MVPP2_MAX_RXD_MAX
;
3723 ring
->tx_max_pending
= MVPP2_MAX_TXD_MAX
;
3724 ring
->rx_pending
= port
->rx_ring_size
;
3725 ring
->tx_pending
= port
->tx_ring_size
;
3728 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
3729 struct ethtool_ringparam
*ring
)
3731 struct mvpp2_port
*port
= netdev_priv(dev
);
3732 u16 prev_rx_ring_size
= port
->rx_ring_size
;
3733 u16 prev_tx_ring_size
= port
->tx_ring_size
;
3736 err
= mvpp2_check_ringparam_valid(dev
, ring
);
3740 if (!netif_running(dev
)) {
3741 port
->rx_ring_size
= ring
->rx_pending
;
3742 port
->tx_ring_size
= ring
->tx_pending
;
3746 /* The interface is running, so we have to force a
3747 * reallocation of the queues
3749 mvpp2_stop_dev(port
);
3750 mvpp2_cleanup_rxqs(port
);
3751 mvpp2_cleanup_txqs(port
);
3753 port
->rx_ring_size
= ring
->rx_pending
;
3754 port
->tx_ring_size
= ring
->tx_pending
;
3756 err
= mvpp2_setup_rxqs(port
);
3758 /* Reallocate Rx queues with the original ring size */
3759 port
->rx_ring_size
= prev_rx_ring_size
;
3760 ring
->rx_pending
= prev_rx_ring_size
;
3761 err
= mvpp2_setup_rxqs(port
);
3765 err
= mvpp2_setup_txqs(port
);
3767 /* Reallocate Tx queues with the original ring size */
3768 port
->tx_ring_size
= prev_tx_ring_size
;
3769 ring
->tx_pending
= prev_tx_ring_size
;
3770 err
= mvpp2_setup_txqs(port
);
3772 goto err_clean_rxqs
;
3775 mvpp2_start_dev(port
);
3776 mvpp2_egress_enable(port
);
3777 mvpp2_ingress_enable(port
);
3782 mvpp2_cleanup_rxqs(port
);
3784 netdev_err(dev
, "failed to change ring parameters");
3788 static void mvpp2_ethtool_get_pause_param(struct net_device
*dev
,
3789 struct ethtool_pauseparam
*pause
)
3791 struct mvpp2_port
*port
= netdev_priv(dev
);
3796 phylink_ethtool_get_pauseparam(port
->phylink
, pause
);
3799 static int mvpp2_ethtool_set_pause_param(struct net_device
*dev
,
3800 struct ethtool_pauseparam
*pause
)
3802 struct mvpp2_port
*port
= netdev_priv(dev
);
3807 return phylink_ethtool_set_pauseparam(port
->phylink
, pause
);
3810 static int mvpp2_ethtool_get_link_ksettings(struct net_device
*dev
,
3811 struct ethtool_link_ksettings
*cmd
)
3813 struct mvpp2_port
*port
= netdev_priv(dev
);
3818 return phylink_ethtool_ksettings_get(port
->phylink
, cmd
);
3821 static int mvpp2_ethtool_set_link_ksettings(struct net_device
*dev
,
3822 const struct ethtool_link_ksettings
*cmd
)
3824 struct mvpp2_port
*port
= netdev_priv(dev
);
3829 return phylink_ethtool_ksettings_set(port
->phylink
, cmd
);
3832 static int mvpp2_ethtool_get_rxnfc(struct net_device
*dev
,
3833 struct ethtool_rxnfc
*info
, u32
*rules
)
3835 struct mvpp2_port
*port
= netdev_priv(dev
);
3838 if (!mvpp22_rss_is_supported())
3841 switch (info
->cmd
) {
3843 ret
= mvpp2_ethtool_rxfh_get(port
, info
);
3845 case ETHTOOL_GRXRINGS
:
3846 info
->data
= port
->nrxqs
;
3855 static int mvpp2_ethtool_set_rxnfc(struct net_device
*dev
,
3856 struct ethtool_rxnfc
*info
)
3858 struct mvpp2_port
*port
= netdev_priv(dev
);
3861 if (!mvpp22_rss_is_supported())
3864 switch (info
->cmd
) {
3866 ret
= mvpp2_ethtool_rxfh_set(port
, info
);
3874 static u32
mvpp2_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
3876 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES
: 0;
3879 static int mvpp2_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
3882 struct mvpp2_port
*port
= netdev_priv(dev
);
3884 if (!mvpp22_rss_is_supported())
3888 memcpy(indir
, port
->indir
,
3889 ARRAY_SIZE(port
->indir
) * sizeof(port
->indir
[0]));
3892 *hfunc
= ETH_RSS_HASH_CRC32
;
3897 static int mvpp2_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
3898 const u8
*key
, const u8 hfunc
)
3900 struct mvpp2_port
*port
= netdev_priv(dev
);
3902 if (!mvpp22_rss_is_supported())
3905 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_CRC32
)
3912 memcpy(port
->indir
, indir
,
3913 ARRAY_SIZE(port
->indir
) * sizeof(port
->indir
[0]));
3914 mvpp22_rss_fill_table(port
, port
->id
);
3922 static const struct net_device_ops mvpp2_netdev_ops
= {
3923 .ndo_open
= mvpp2_open
,
3924 .ndo_stop
= mvpp2_stop
,
3925 .ndo_start_xmit
= mvpp2_tx
,
3926 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
3927 .ndo_set_mac_address
= mvpp2_set_mac_address
,
3928 .ndo_change_mtu
= mvpp2_change_mtu
,
3929 .ndo_get_stats64
= mvpp2_get_stats64
,
3930 .ndo_do_ioctl
= mvpp2_ioctl
,
3931 .ndo_vlan_rx_add_vid
= mvpp2_vlan_rx_add_vid
,
3932 .ndo_vlan_rx_kill_vid
= mvpp2_vlan_rx_kill_vid
,
3933 .ndo_set_features
= mvpp2_set_features
,
3936 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
3937 .nway_reset
= mvpp2_ethtool_nway_reset
,
3938 .get_link
= ethtool_op_get_link
,
3939 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
3940 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
3941 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
3942 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
3943 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
3944 .get_strings
= mvpp2_ethtool_get_strings
,
3945 .get_ethtool_stats
= mvpp2_ethtool_get_stats
,
3946 .get_sset_count
= mvpp2_ethtool_get_sset_count
,
3947 .get_pauseparam
= mvpp2_ethtool_get_pause_param
,
3948 .set_pauseparam
= mvpp2_ethtool_set_pause_param
,
3949 .get_link_ksettings
= mvpp2_ethtool_get_link_ksettings
,
3950 .set_link_ksettings
= mvpp2_ethtool_set_link_ksettings
,
3951 .get_rxnfc
= mvpp2_ethtool_get_rxnfc
,
3952 .set_rxnfc
= mvpp2_ethtool_set_rxnfc
,
3953 .get_rxfh_indir_size
= mvpp2_ethtool_get_rxfh_indir_size
,
3954 .get_rxfh
= mvpp2_ethtool_get_rxfh
,
3955 .set_rxfh
= mvpp2_ethtool_set_rxfh
,
3959 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
3960 * had a single IRQ defined per-port.
3962 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port
*port
,
3963 struct device_node
*port_node
)
3965 struct mvpp2_queue_vector
*v
= &port
->qvecs
[0];
3968 v
->nrxqs
= port
->nrxqs
;
3969 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
3970 v
->sw_thread_id
= 0;
3971 v
->sw_thread_mask
= *cpumask_bits(cpu_online_mask
);
3973 v
->irq
= irq_of_parse_and_map(port_node
, 0);
3976 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
3984 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port
*port
,
3985 struct device_node
*port_node
)
3987 struct mvpp2_queue_vector
*v
;
3990 port
->nqvecs
= num_possible_cpus();
3991 if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
)
3994 for (i
= 0; i
< port
->nqvecs
; i
++) {
3997 v
= port
->qvecs
+ i
;
4000 v
->type
= MVPP2_QUEUE_VECTOR_PRIVATE
;
4001 v
->sw_thread_id
= i
;
4002 v
->sw_thread_mask
= BIT(i
);
4004 snprintf(irqname
, sizeof(irqname
), "tx-cpu%d", i
);
4006 if (queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
4007 v
->first_rxq
= i
* MVPP2_DEFAULT_RXQ
;
4008 v
->nrxqs
= MVPP2_DEFAULT_RXQ
;
4009 } else if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
&&
4010 i
== (port
->nqvecs
- 1)) {
4012 v
->nrxqs
= port
->nrxqs
;
4013 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
4014 strncpy(irqname
, "rx-shared", sizeof(irqname
));
4018 v
->irq
= of_irq_get_byname(port_node
, irqname
);
4020 v
->irq
= fwnode_irq_get(port
->fwnode
, i
);
4026 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
4033 for (i
= 0; i
< port
->nqvecs
; i
++)
4034 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4038 static int mvpp2_queue_vectors_init(struct mvpp2_port
*port
,
4039 struct device_node
*port_node
)
4041 if (port
->has_tx_irqs
)
4042 return mvpp2_multi_queue_vectors_init(port
, port_node
);
4044 return mvpp2_simple_queue_vectors_init(port
, port_node
);
4047 static void mvpp2_queue_vectors_deinit(struct mvpp2_port
*port
)
4051 for (i
= 0; i
< port
->nqvecs
; i
++)
4052 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4055 /* Configure Rx queue group interrupt for this port */
4056 static void mvpp2_rx_irqs_setup(struct mvpp2_port
*port
)
4058 struct mvpp2
*priv
= port
->priv
;
4062 if (priv
->hw_version
== MVPP21
) {
4063 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(port
->id
),
4068 /* Handle the more complicated PPv2.2 case */
4069 for (i
= 0; i
< port
->nqvecs
; i
++) {
4070 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
4075 val
= qv
->sw_thread_id
;
4076 val
|= port
->id
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
;
4077 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
4079 val
= qv
->first_rxq
;
4080 val
|= qv
->nrxqs
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
;
4081 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
4085 /* Initialize port HW */
4086 static int mvpp2_port_init(struct mvpp2_port
*port
)
4088 struct device
*dev
= port
->dev
->dev
.parent
;
4089 struct mvpp2
*priv
= port
->priv
;
4090 struct mvpp2_txq_pcpu
*txq_pcpu
;
4091 int queue
, cpu
, err
;
4093 /* Checks for hardware constraints */
4094 if (port
->first_rxq
+ port
->nrxqs
>
4095 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
4098 if (port
->nrxqs
% MVPP2_DEFAULT_RXQ
||
4099 port
->nrxqs
> priv
->max_port_rxqs
|| port
->ntxqs
> MVPP2_MAX_TXQ
)
4103 mvpp2_egress_disable(port
);
4104 mvpp2_port_disable(port
);
4106 port
->tx_time_coal
= MVPP2_TXDONE_COAL_USEC
;
4108 port
->txqs
= devm_kcalloc(dev
, port
->ntxqs
, sizeof(*port
->txqs
),
4113 /* Associate physical Tx queues to this port and initialize.
4114 * The mapping is predefined.
4116 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4117 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
4118 struct mvpp2_tx_queue
*txq
;
4120 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
4123 goto err_free_percpu
;
4126 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
4129 goto err_free_percpu
;
4132 txq
->id
= queue_phy_id
;
4133 txq
->log_id
= queue
;
4134 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
4135 for_each_present_cpu(cpu
) {
4136 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4137 txq_pcpu
->cpu
= cpu
;
4140 port
->txqs
[queue
] = txq
;
4143 port
->rxqs
= devm_kcalloc(dev
, port
->nrxqs
, sizeof(*port
->rxqs
),
4147 goto err_free_percpu
;
4150 /* Allocate and initialize Rx queue for this port */
4151 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4152 struct mvpp2_rx_queue
*rxq
;
4154 /* Map physical Rx queue to port's logical Rx queue */
4155 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
4158 goto err_free_percpu
;
4160 /* Map this Rx queue to a physical queue */
4161 rxq
->id
= port
->first_rxq
+ queue
;
4162 rxq
->port
= port
->id
;
4163 rxq
->logic_rxq
= queue
;
4165 port
->rxqs
[queue
] = rxq
;
4168 mvpp2_rx_irqs_setup(port
);
4170 /* Create Rx descriptor rings */
4171 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4172 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
4174 rxq
->size
= port
->rx_ring_size
;
4175 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
4176 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
4179 mvpp2_ingress_disable(port
);
4181 /* Port default configuration */
4182 mvpp2_defaults_set(port
);
4184 /* Port's classifier configuration */
4185 mvpp2_cls_oversize_rxq_set(port
);
4186 mvpp2_cls_port_config(port
);
4188 if (mvpp22_rss_is_supported())
4189 mvpp22_rss_port_init(port
);
4191 /* Provide an initial Rx packet size */
4192 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
4194 /* Initialize pools for swf */
4195 err
= mvpp2_swf_bm_pool_init(port
);
4197 goto err_free_percpu
;
4202 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4203 if (!port
->txqs
[queue
])
4205 free_percpu(port
->txqs
[queue
]->pcpu
);
4210 /* Checks if the port DT description has the TX interrupts
4211 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
4212 * there are available, but we need to keep support for old DTs.
4214 static bool mvpp2_port_has_tx_irqs(struct mvpp2
*priv
,
4215 struct device_node
*port_node
)
4217 char *irqs
[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
4218 "tx-cpu2", "tx-cpu3" };
4221 if (priv
->hw_version
== MVPP21
)
4224 for (i
= 0; i
< 5; i
++) {
4225 ret
= of_property_match_string(port_node
, "interrupt-names",
4234 static void mvpp2_port_copy_mac_addr(struct net_device
*dev
, struct mvpp2
*priv
,
4235 struct fwnode_handle
*fwnode
,
4238 struct mvpp2_port
*port
= netdev_priv(dev
);
4239 char hw_mac_addr
[ETH_ALEN
] = {0};
4240 char fw_mac_addr
[ETH_ALEN
];
4242 if (fwnode_get_mac_address(fwnode
, fw_mac_addr
, ETH_ALEN
)) {
4243 *mac_from
= "firmware node";
4244 ether_addr_copy(dev
->dev_addr
, fw_mac_addr
);
4248 if (priv
->hw_version
== MVPP21
) {
4249 mvpp21_get_mac_address(port
, hw_mac_addr
);
4250 if (is_valid_ether_addr(hw_mac_addr
)) {
4251 *mac_from
= "hardware";
4252 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
4257 *mac_from
= "random";
4258 eth_hw_addr_random(dev
);
4261 static void mvpp2_phylink_validate(struct net_device
*dev
,
4262 unsigned long *supported
,
4263 struct phylink_link_state
*state
)
4265 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
4267 phylink_set(mask
, Autoneg
);
4268 phylink_set_port_modes(mask
);
4269 phylink_set(mask
, Pause
);
4270 phylink_set(mask
, Asym_Pause
);
4272 switch (state
->interface
) {
4273 case PHY_INTERFACE_MODE_10GKR
:
4274 phylink_set(mask
, 10000baseCR_Full
);
4275 phylink_set(mask
, 10000baseSR_Full
);
4276 phylink_set(mask
, 10000baseLR_Full
);
4277 phylink_set(mask
, 10000baseLRM_Full
);
4278 phylink_set(mask
, 10000baseER_Full
);
4279 phylink_set(mask
, 10000baseKR_Full
);
4282 phylink_set(mask
, 10baseT_Half
);
4283 phylink_set(mask
, 10baseT_Full
);
4284 phylink_set(mask
, 100baseT_Half
);
4285 phylink_set(mask
, 100baseT_Full
);
4286 phylink_set(mask
, 10000baseT_Full
);
4288 case PHY_INTERFACE_MODE_1000BASEX
:
4289 case PHY_INTERFACE_MODE_2500BASEX
:
4290 phylink_set(mask
, 1000baseT_Full
);
4291 phylink_set(mask
, 1000baseX_Full
);
4292 phylink_set(mask
, 2500baseX_Full
);
4295 bitmap_and(supported
, supported
, mask
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
4296 bitmap_and(state
->advertising
, state
->advertising
, mask
,
4297 __ETHTOOL_LINK_MODE_MASK_NBITS
);
4300 static void mvpp22_xlg_link_state(struct mvpp2_port
*port
,
4301 struct phylink_link_state
*state
)
4305 state
->speed
= SPEED_10000
;
4307 state
->an_complete
= 1;
4309 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
4310 state
->link
= !!(val
& MVPP22_XLG_STATUS_LINK_UP
);
4313 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4314 if (val
& MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
)
4315 state
->pause
|= MLO_PAUSE_TX
;
4316 if (val
& MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
)
4317 state
->pause
|= MLO_PAUSE_RX
;
4320 static void mvpp2_gmac_link_state(struct mvpp2_port
*port
,
4321 struct phylink_link_state
*state
)
4325 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
4327 state
->an_complete
= !!(val
& MVPP2_GMAC_STATUS0_AN_COMPLETE
);
4328 state
->link
= !!(val
& MVPP2_GMAC_STATUS0_LINK_UP
);
4329 state
->duplex
= !!(val
& MVPP2_GMAC_STATUS0_FULL_DUPLEX
);
4331 switch (port
->phy_interface
) {
4332 case PHY_INTERFACE_MODE_1000BASEX
:
4333 state
->speed
= SPEED_1000
;
4335 case PHY_INTERFACE_MODE_2500BASEX
:
4336 state
->speed
= SPEED_2500
;
4339 if (val
& MVPP2_GMAC_STATUS0_GMII_SPEED
)
4340 state
->speed
= SPEED_1000
;
4341 else if (val
& MVPP2_GMAC_STATUS0_MII_SPEED
)
4342 state
->speed
= SPEED_100
;
4344 state
->speed
= SPEED_10
;
4348 if (val
& MVPP2_GMAC_STATUS0_RX_PAUSE
)
4349 state
->pause
|= MLO_PAUSE_RX
;
4350 if (val
& MVPP2_GMAC_STATUS0_TX_PAUSE
)
4351 state
->pause
|= MLO_PAUSE_TX
;
4354 static int mvpp2_phylink_mac_link_state(struct net_device
*dev
,
4355 struct phylink_link_state
*state
)
4357 struct mvpp2_port
*port
= netdev_priv(dev
);
4359 if (port
->priv
->hw_version
== MVPP22
&& port
->gop_id
== 0) {
4360 u32 mode
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
4361 mode
&= MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
4363 if (mode
== MVPP22_XLG_CTRL3_MACMODESELECT_10G
) {
4364 mvpp22_xlg_link_state(port
, state
);
4369 mvpp2_gmac_link_state(port
, state
);
4373 static void mvpp2_mac_an_restart(struct net_device
*dev
)
4375 struct mvpp2_port
*port
= netdev_priv(dev
);
4378 if (port
->phy_interface
!= PHY_INTERFACE_MODE_SGMII
)
4381 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4382 /* The RESTART_AN bit is cleared by the h/w after restarting the AN
4385 val
|= MVPP2_GMAC_IN_BAND_RESTART_AN
| MVPP2_GMAC_IN_BAND_AUTONEG
;
4386 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4389 static void mvpp2_xlg_config(struct mvpp2_port
*port
, unsigned int mode
,
4390 const struct phylink_link_state
*state
)
4394 ctrl0
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4395 ctrl4
= readl(port
->base
+ MVPP22_XLG_CTRL4_REG
);
4397 if (state
->pause
& MLO_PAUSE_TX
)
4398 ctrl0
|= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
;
4399 if (state
->pause
& MLO_PAUSE_RX
)
4400 ctrl0
|= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
;
4402 ctrl4
&= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC
;
4403 ctrl4
|= MVPP22_XLG_CTRL4_FWD_FC
| MVPP22_XLG_CTRL4_FWD_PFC
|
4404 MVPP22_XLG_CTRL4_EN_IDLE_CHECK
;
4406 writel(ctrl0
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4407 writel(ctrl4
, port
->base
+ MVPP22_XLG_CTRL4_REG
);
4410 static void mvpp2_gmac_config(struct mvpp2_port
*port
, unsigned int mode
,
4411 const struct phylink_link_state
*state
)
4413 u32 an
, ctrl0
, ctrl2
, ctrl4
;
4415 an
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4416 ctrl0
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4417 ctrl2
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4418 ctrl4
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4420 /* Force link down */
4421 an
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
4422 an
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
4423 writel(an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4425 /* Set the GMAC in a reset state */
4426 ctrl2
|= MVPP2_GMAC_PORT_RESET_MASK
;
4427 writel(ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4429 an
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
| MVPP2_GMAC_CONFIG_GMII_SPEED
|
4430 MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FC_ADV_EN
|
4431 MVPP2_GMAC_FC_ADV_ASM_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
|
4432 MVPP2_GMAC_CONFIG_FULL_DUPLEX
| MVPP2_GMAC_AN_DUPLEX_EN
|
4433 MVPP2_GMAC_FORCE_LINK_DOWN
);
4434 ctrl0
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
4435 ctrl2
&= ~(MVPP2_GMAC_PORT_RESET_MASK
| MVPP2_GMAC_PCS_ENABLE_MASK
);
4437 if (state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4438 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
4439 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
4440 * they negotiate duplex: they are always operating with a fixed
4441 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
4442 * speed and full duplex here.
4444 ctrl0
|= MVPP2_GMAC_PORT_TYPE_MASK
;
4445 an
|= MVPP2_GMAC_CONFIG_GMII_SPEED
|
4446 MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4447 } else if (!phy_interface_mode_is_rgmii(state
->interface
)) {
4448 an
|= MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
;
4452 an
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4453 if (phylink_test(state
->advertising
, Pause
))
4454 an
|= MVPP2_GMAC_FC_ADV_EN
;
4455 if (phylink_test(state
->advertising
, Asym_Pause
))
4456 an
|= MVPP2_GMAC_FC_ADV_ASM_EN
;
4458 if (state
->interface
== PHY_INTERFACE_MODE_SGMII
||
4459 state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4460 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
4461 an
|= MVPP2_GMAC_IN_BAND_AUTONEG
;
4462 ctrl2
|= MVPP2_GMAC_INBAND_AN_MASK
| MVPP2_GMAC_PCS_ENABLE_MASK
;
4464 ctrl4
&= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
4465 MVPP22_CTRL4_RX_FC_EN
| MVPP22_CTRL4_TX_FC_EN
);
4466 ctrl4
|= MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4467 MVPP22_CTRL4_DP_CLK_SEL
|
4468 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4470 if (state
->pause
& MLO_PAUSE_TX
)
4471 ctrl4
|= MVPP22_CTRL4_TX_FC_EN
;
4472 if (state
->pause
& MLO_PAUSE_RX
)
4473 ctrl4
|= MVPP22_CTRL4_RX_FC_EN
;
4474 } else if (phy_interface_mode_is_rgmii(state
->interface
)) {
4475 an
|= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS
;
4477 if (state
->speed
== SPEED_1000
)
4478 an
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4479 else if (state
->speed
== SPEED_100
)
4480 an
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4482 ctrl4
&= ~MVPP22_CTRL4_DP_CLK_SEL
;
4483 ctrl4
|= MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
4484 MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4485 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4488 writel(ctrl0
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4489 writel(ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4490 writel(ctrl4
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4491 writel(an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4494 static void mvpp2_mac_config(struct net_device
*dev
, unsigned int mode
,
4495 const struct phylink_link_state
*state
)
4497 struct mvpp2_port
*port
= netdev_priv(dev
);
4499 /* Check for invalid configuration */
4500 if (state
->interface
== PHY_INTERFACE_MODE_10GKR
&& port
->gop_id
!= 0) {
4501 netdev_err(dev
, "Invalid mode on %s\n", dev
->name
);
4505 /* Make sure the port is disabled when reconfiguring the mode */
4506 mvpp2_port_disable(port
);
4508 if (port
->priv
->hw_version
== MVPP22
&&
4509 port
->phy_interface
!= state
->interface
) {
4510 port
->phy_interface
= state
->interface
;
4512 /* Reconfigure the serdes lanes */
4513 phy_power_off(port
->comphy
);
4514 mvpp22_mode_reconfigure(port
);
4517 /* mac (re)configuration */
4518 if (state
->interface
== PHY_INTERFACE_MODE_10GKR
)
4519 mvpp2_xlg_config(port
, mode
, state
);
4520 else if (phy_interface_mode_is_rgmii(state
->interface
) ||
4521 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
4522 state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4523 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
)
4524 mvpp2_gmac_config(port
, mode
, state
);
4526 if (port
->priv
->hw_version
== MVPP21
&& port
->flags
& MVPP2_F_LOOPBACK
)
4527 mvpp2_port_loopback_set(port
, state
);
4529 mvpp2_port_enable(port
);
4532 static void mvpp2_mac_link_up(struct net_device
*dev
, unsigned int mode
,
4533 phy_interface_t interface
, struct phy_device
*phy
)
4535 struct mvpp2_port
*port
= netdev_priv(dev
);
4538 if (!phylink_autoneg_inband(mode
) &&
4539 interface
!= PHY_INTERFACE_MODE_10GKR
) {
4540 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4541 val
&= ~MVPP2_GMAC_FORCE_LINK_DOWN
;
4542 if (phy_interface_mode_is_rgmii(interface
))
4543 val
|= MVPP2_GMAC_FORCE_LINK_PASS
;
4544 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4547 mvpp2_port_enable(port
);
4549 mvpp2_egress_enable(port
);
4550 mvpp2_ingress_enable(port
);
4551 netif_tx_wake_all_queues(dev
);
4554 static void mvpp2_mac_link_down(struct net_device
*dev
, unsigned int mode
,
4555 phy_interface_t interface
)
4557 struct mvpp2_port
*port
= netdev_priv(dev
);
4560 if (!phylink_autoneg_inband(mode
) &&
4561 interface
!= PHY_INTERFACE_MODE_10GKR
) {
4562 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4563 val
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
4564 val
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
4565 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4568 netif_tx_stop_all_queues(dev
);
4569 mvpp2_egress_disable(port
);
4570 mvpp2_ingress_disable(port
);
4572 /* When using link interrupts to notify phylink of a MAC state change,
4573 * we do not want the port to be disabled (we want to receive further
4574 * interrupts, to be notified when the port will have a link later).
4579 mvpp2_port_disable(port
);
4582 static const struct phylink_mac_ops mvpp2_phylink_ops
= {
4583 .validate
= mvpp2_phylink_validate
,
4584 .mac_link_state
= mvpp2_phylink_mac_link_state
,
4585 .mac_an_restart
= mvpp2_mac_an_restart
,
4586 .mac_config
= mvpp2_mac_config
,
4587 .mac_link_up
= mvpp2_mac_link_up
,
4588 .mac_link_down
= mvpp2_mac_link_down
,
4591 /* Ports initialization */
4592 static int mvpp2_port_probe(struct platform_device
*pdev
,
4593 struct fwnode_handle
*port_fwnode
,
4596 struct phy
*comphy
= NULL
;
4597 struct mvpp2_port
*port
;
4598 struct mvpp2_port_pcpu
*port_pcpu
;
4599 struct device_node
*port_node
= to_of_node(port_fwnode
);
4600 struct net_device
*dev
;
4601 struct resource
*res
;
4602 struct phylink
*phylink
;
4603 char *mac_from
= "";
4604 unsigned int ntxqs
, nrxqs
;
4612 has_tx_irqs
= mvpp2_port_has_tx_irqs(priv
, port_node
);
4615 queue_mode
= MVPP2_QDIST_MULTI_MODE
;
4619 queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
4621 ntxqs
= MVPP2_MAX_TXQ
;
4622 if (priv
->hw_version
== MVPP22
&& queue_mode
== MVPP2_QDIST_MULTI_MODE
)
4623 nrxqs
= MVPP2_DEFAULT_RXQ
* num_possible_cpus();
4625 nrxqs
= MVPP2_DEFAULT_RXQ
;
4627 dev
= alloc_etherdev_mqs(sizeof(*port
), ntxqs
, nrxqs
);
4631 phy_mode
= fwnode_get_phy_mode(port_fwnode
);
4633 dev_err(&pdev
->dev
, "incorrect phy mode\n");
4635 goto err_free_netdev
;
4639 comphy
= devm_of_phy_get(&pdev
->dev
, port_node
, NULL
);
4640 if (IS_ERR(comphy
)) {
4641 if (PTR_ERR(comphy
) == -EPROBE_DEFER
) {
4642 err
= -EPROBE_DEFER
;
4643 goto err_free_netdev
;
4649 if (fwnode_property_read_u32(port_fwnode
, "port-id", &id
)) {
4651 dev_err(&pdev
->dev
, "missing port-id value\n");
4652 goto err_free_netdev
;
4655 dev
->tx_queue_len
= MVPP2_MAX_TXD_MAX
;
4656 dev
->watchdog_timeo
= 5 * HZ
;
4657 dev
->netdev_ops
= &mvpp2_netdev_ops
;
4658 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
4660 port
= netdev_priv(dev
);
4662 port
->fwnode
= port_fwnode
;
4663 port
->has_phy
= !!of_find_property(port_node
, "phy", NULL
);
4664 port
->ntxqs
= ntxqs
;
4665 port
->nrxqs
= nrxqs
;
4667 port
->has_tx_irqs
= has_tx_irqs
;
4669 err
= mvpp2_queue_vectors_init(port
, port_node
);
4671 goto err_free_netdev
;
4674 port
->link_irq
= of_irq_get_byname(port_node
, "link");
4676 port
->link_irq
= fwnode_irq_get(port_fwnode
, port
->nqvecs
+ 1);
4677 if (port
->link_irq
== -EPROBE_DEFER
) {
4678 err
= -EPROBE_DEFER
;
4679 goto err_deinit_qvecs
;
4681 if (port
->link_irq
<= 0)
4682 /* the link irq is optional */
4685 if (fwnode_property_read_bool(port_fwnode
, "marvell,loopback"))
4686 port
->flags
|= MVPP2_F_LOOPBACK
;
4689 if (priv
->hw_version
== MVPP21
)
4690 port
->first_rxq
= port
->id
* port
->nrxqs
;
4692 port
->first_rxq
= port
->id
* priv
->max_port_rxqs
;
4694 port
->of_node
= port_node
;
4695 port
->phy_interface
= phy_mode
;
4696 port
->comphy
= comphy
;
4698 if (priv
->hw_version
== MVPP21
) {
4699 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2 + id
);
4700 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
4701 if (IS_ERR(port
->base
)) {
4702 err
= PTR_ERR(port
->base
);
4706 port
->stats_base
= port
->priv
->lms_base
+
4707 MVPP21_MIB_COUNTERS_OFFSET
+
4708 port
->gop_id
* MVPP21_MIB_COUNTERS_PORT_SZ
;
4710 if (fwnode_property_read_u32(port_fwnode
, "gop-port-id",
4713 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
4714 goto err_deinit_qvecs
;
4717 port
->base
= priv
->iface_base
+ MVPP22_GMAC_BASE(port
->gop_id
);
4718 port
->stats_base
= port
->priv
->iface_base
+
4719 MVPP22_MIB_COUNTERS_OFFSET
+
4720 port
->gop_id
* MVPP22_MIB_COUNTERS_PORT_SZ
;
4723 /* Alloc per-cpu and ethtool stats */
4724 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
4730 port
->ethtool_stats
= devm_kcalloc(&pdev
->dev
,
4731 ARRAY_SIZE(mvpp2_ethtool_regs
),
4732 sizeof(u64
), GFP_KERNEL
);
4733 if (!port
->ethtool_stats
) {
4735 goto err_free_stats
;
4738 mutex_init(&port
->gather_stats_lock
);
4739 INIT_DELAYED_WORK(&port
->stats_work
, mvpp2_gather_hw_statistics
);
4741 mvpp2_port_copy_mac_addr(dev
, priv
, port_fwnode
, &mac_from
);
4743 port
->tx_ring_size
= MVPP2_MAX_TXD_DFLT
;
4744 port
->rx_ring_size
= MVPP2_MAX_RXD_DFLT
;
4745 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4747 err
= mvpp2_port_init(port
);
4749 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
4750 goto err_free_stats
;
4753 mvpp2_port_periodic_xon_disable(port
);
4755 mvpp2_port_reset(port
);
4757 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
4760 goto err_free_txq_pcpu
;
4763 if (!port
->has_tx_irqs
) {
4764 for_each_present_cpu(cpu
) {
4765 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
4767 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
4768 HRTIMER_MODE_REL_PINNED
);
4769 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
4770 port_pcpu
->timer_scheduled
= false;
4772 tasklet_init(&port_pcpu
->tx_done_tasklet
,
4774 (unsigned long)dev
);
4778 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4780 dev
->features
= features
| NETIF_F_RXCSUM
;
4781 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
|
4782 NETIF_F_HW_VLAN_CTAG_FILTER
;
4784 if (mvpp22_rss_is_supported())
4785 dev
->hw_features
|= NETIF_F_RXHASH
;
4787 if (port
->pool_long
->id
== MVPP2_BM_JUMBO
&& port
->id
!= 0) {
4788 dev
->features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
4789 dev
->hw_features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
4792 dev
->vlan_features
|= features
;
4793 dev
->gso_max_segs
= MVPP2_MAX_TSO_SEGS
;
4794 dev
->priv_flags
|= IFF_UNICAST_FLT
;
4796 /* MTU range: 68 - 9704 */
4797 dev
->min_mtu
= ETH_MIN_MTU
;
4798 /* 9704 == 9728 - 20 and rounding to 8 */
4799 dev
->max_mtu
= MVPP2_BM_JUMBO_PKT_SIZE
;
4800 dev
->dev
.of_node
= port_node
;
4802 /* Phylink isn't used w/ ACPI as of now */
4804 phylink
= phylink_create(dev
, port_fwnode
, phy_mode
,
4805 &mvpp2_phylink_ops
);
4806 if (IS_ERR(phylink
)) {
4807 err
= PTR_ERR(phylink
);
4808 goto err_free_port_pcpu
;
4810 port
->phylink
= phylink
;
4812 port
->phylink
= NULL
;
4815 err
= register_netdev(dev
);
4817 dev_err(&pdev
->dev
, "failed to register netdev\n");
4820 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
4822 priv
->port_list
[priv
->port_count
++] = port
;
4828 phylink_destroy(port
->phylink
);
4830 free_percpu(port
->pcpu
);
4832 for (i
= 0; i
< port
->ntxqs
; i
++)
4833 free_percpu(port
->txqs
[i
]->pcpu
);
4835 free_percpu(port
->stats
);
4838 irq_dispose_mapping(port
->link_irq
);
4840 mvpp2_queue_vectors_deinit(port
);
4846 /* Ports removal routine */
4847 static void mvpp2_port_remove(struct mvpp2_port
*port
)
4851 unregister_netdev(port
->dev
);
4853 phylink_destroy(port
->phylink
);
4854 free_percpu(port
->pcpu
);
4855 free_percpu(port
->stats
);
4856 for (i
= 0; i
< port
->ntxqs
; i
++)
4857 free_percpu(port
->txqs
[i
]->pcpu
);
4858 mvpp2_queue_vectors_deinit(port
);
4860 irq_dispose_mapping(port
->link_irq
);
4861 free_netdev(port
->dev
);
4864 /* Initialize decoding windows */
4865 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
4871 for (i
= 0; i
< 6; i
++) {
4872 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
4873 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
4876 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
4881 for (i
= 0; i
< dram
->num_cs
; i
++) {
4882 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
4884 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
4885 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
4886 dram
->mbus_dram_target_id
);
4888 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
4889 (cs
->size
- 1) & 0xffff0000);
4891 win_enable
|= (1 << i
);
4894 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
4897 /* Initialize Rx FIFO's */
4898 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
4902 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
4903 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4904 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
4905 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4906 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
4909 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
4910 MVPP2_RX_FIFO_PORT_MIN_PKT
);
4911 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
4914 static void mvpp22_rx_fifo_init(struct mvpp2
*priv
)
4918 /* The FIFO size parameters are set depending on the maximum speed a
4919 * given port can handle:
4922 * - Ports 2 and 3: 1Gbps
4925 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
4926 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB
);
4927 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
4928 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB
);
4930 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
4931 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB
);
4932 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
4933 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB
);
4935 for (port
= 2; port
< MVPP2_MAX_PORTS
; port
++) {
4936 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4937 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
4938 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4939 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
4942 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
4943 MVPP2_RX_FIFO_PORT_MIN_PKT
);
4944 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
4947 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
4948 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
4949 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
4951 static void mvpp22_tx_fifo_init(struct mvpp2
*priv
)
4953 int port
, size
, thrs
;
4955 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
4957 size
= MVPP22_TX_FIFO_DATA_SIZE_10KB
;
4958 thrs
= MVPP2_TX_FIFO_THRESHOLD_10KB
;
4960 size
= MVPP22_TX_FIFO_DATA_SIZE_3KB
;
4961 thrs
= MVPP2_TX_FIFO_THRESHOLD_3KB
;
4963 mvpp2_write(priv
, MVPP22_TX_FIFO_SIZE_REG(port
), size
);
4964 mvpp2_write(priv
, MVPP22_TX_FIFO_THRESH_REG(port
), thrs
);
4968 static void mvpp2_axi_init(struct mvpp2
*priv
)
4970 u32 val
, rdval
, wrval
;
4972 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
4974 /* AXI Bridge Configuration */
4976 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
4977 << MVPP22_AXI_ATTR_CACHE_OFFS
;
4978 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4979 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
4981 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
4982 << MVPP22_AXI_ATTR_CACHE_OFFS
;
4983 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4984 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
4987 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
4988 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
4991 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
4992 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
4993 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
4994 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
4997 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
4998 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
5000 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
5001 << MVPP22_AXI_CODE_CACHE_OFFS
;
5002 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5003 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5004 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
5005 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
5007 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
5008 << MVPP22_AXI_CODE_CACHE_OFFS
;
5009 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5010 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5012 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
5014 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
5015 << MVPP22_AXI_CODE_CACHE_OFFS
;
5016 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5017 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5019 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
5022 /* Initialize network controller common part HW */
5023 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
5025 const struct mbus_dram_target_info
*dram_target_info
;
5029 /* MBUS windows configuration */
5030 dram_target_info
= mv_mbus_dram_info();
5031 if (dram_target_info
)
5032 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
5034 if (priv
->hw_version
== MVPP22
)
5035 mvpp2_axi_init(priv
);
5037 /* Disable HW PHY polling */
5038 if (priv
->hw_version
== MVPP21
) {
5039 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5040 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
5041 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5043 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5044 val
&= ~MVPP22_SMI_POLLING_EN
;
5045 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5048 /* Allocate and initialize aggregated TXQs */
5049 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
5050 sizeof(*priv
->aggr_txqs
),
5052 if (!priv
->aggr_txqs
)
5055 for_each_present_cpu(i
) {
5056 priv
->aggr_txqs
[i
].id
= i
;
5057 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
5058 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
], i
, priv
);
5064 if (priv
->hw_version
== MVPP21
) {
5065 mvpp2_rx_fifo_init(priv
);
5067 mvpp22_rx_fifo_init(priv
);
5068 mvpp22_tx_fifo_init(priv
);
5071 if (priv
->hw_version
== MVPP21
)
5072 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
5073 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
5075 /* Allow cache snoop when transmiting packets */
5076 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
5078 /* Buffer Manager initialization */
5079 err
= mvpp2_bm_init(pdev
, priv
);
5083 /* Parser default initialization */
5084 err
= mvpp2_prs_default_init(pdev
, priv
);
5088 /* Classifier default initialization */
5089 mvpp2_cls_init(priv
);
5094 static int mvpp2_probe(struct platform_device
*pdev
)
5096 const struct acpi_device_id
*acpi_id
;
5097 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5098 struct fwnode_handle
*port_fwnode
;
5100 struct resource
*res
;
5105 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
5109 if (has_acpi_companion(&pdev
->dev
)) {
5110 acpi_id
= acpi_match_device(pdev
->dev
.driver
->acpi_match_table
,
5112 priv
->hw_version
= (unsigned long)acpi_id
->driver_data
;
5115 (unsigned long)of_device_get_match_data(&pdev
->dev
);
5118 /* multi queue mode isn't supported on PPV2.1, fallback to single
5121 if (priv
->hw_version
== MVPP21
)
5122 queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
5124 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
5125 base
= devm_ioremap_resource(&pdev
->dev
, res
);
5127 return PTR_ERR(base
);
5129 if (priv
->hw_version
== MVPP21
) {
5130 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
5131 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
5132 if (IS_ERR(priv
->lms_base
))
5133 return PTR_ERR(priv
->lms_base
);
5135 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
5136 if (has_acpi_companion(&pdev
->dev
)) {
5137 /* In case the MDIO memory region is declared in
5138 * the ACPI, it can already appear as 'in-use'
5139 * in the OS. Because it is overlapped by second
5140 * region of the network controller, make
5141 * sure it is released, before requesting it again.
5142 * The care is taken by mvpp2 driver to avoid
5143 * concurrent access to this memory region.
5145 release_resource(res
);
5147 priv
->iface_base
= devm_ioremap_resource(&pdev
->dev
, res
);
5148 if (IS_ERR(priv
->iface_base
))
5149 return PTR_ERR(priv
->iface_base
);
5152 if (priv
->hw_version
== MVPP22
&& dev_of_node(&pdev
->dev
)) {
5153 priv
->sysctrl_base
=
5154 syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
5155 "marvell,system-controller");
5156 if (IS_ERR(priv
->sysctrl_base
))
5157 /* The system controller regmap is optional for dt
5158 * compatibility reasons. When not provided, the
5159 * configuration of the GoP relies on the
5160 * firmware/bootloader.
5162 priv
->sysctrl_base
= NULL
;
5165 mvpp2_setup_bm_pool();
5167 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5170 addr_space_sz
= (priv
->hw_version
== MVPP21
?
5171 MVPP21_ADDR_SPACE_SZ
: MVPP22_ADDR_SPACE_SZ
);
5172 priv
->swth_base
[i
] = base
+ i
* addr_space_sz
;
5175 if (priv
->hw_version
== MVPP21
)
5176 priv
->max_port_rxqs
= 8;
5178 priv
->max_port_rxqs
= 32;
5180 if (dev_of_node(&pdev
->dev
)) {
5181 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
5182 if (IS_ERR(priv
->pp_clk
))
5183 return PTR_ERR(priv
->pp_clk
);
5184 err
= clk_prepare_enable(priv
->pp_clk
);
5188 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
5189 if (IS_ERR(priv
->gop_clk
)) {
5190 err
= PTR_ERR(priv
->gop_clk
);
5193 err
= clk_prepare_enable(priv
->gop_clk
);
5197 if (priv
->hw_version
== MVPP22
) {
5198 priv
->mg_clk
= devm_clk_get(&pdev
->dev
, "mg_clk");
5199 if (IS_ERR(priv
->mg_clk
)) {
5200 err
= PTR_ERR(priv
->mg_clk
);
5204 err
= clk_prepare_enable(priv
->mg_clk
);
5208 priv
->mg_core_clk
= devm_clk_get(&pdev
->dev
, "mg_core_clk");
5209 if (IS_ERR(priv
->mg_core_clk
)) {
5210 priv
->mg_core_clk
= NULL
;
5212 err
= clk_prepare_enable(priv
->mg_core_clk
);
5218 priv
->axi_clk
= devm_clk_get(&pdev
->dev
, "axi_clk");
5219 if (IS_ERR(priv
->axi_clk
)) {
5220 err
= PTR_ERR(priv
->axi_clk
);
5221 if (err
== -EPROBE_DEFER
)
5222 goto err_mg_core_clk
;
5223 priv
->axi_clk
= NULL
;
5225 err
= clk_prepare_enable(priv
->axi_clk
);
5227 goto err_mg_core_clk
;
5230 /* Get system's tclk rate */
5231 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
5232 } else if (device_property_read_u32(&pdev
->dev
, "clock-frequency",
5234 dev_err(&pdev
->dev
, "missing clock-frequency value\n");
5238 if (priv
->hw_version
== MVPP22
) {
5239 err
= dma_set_mask(&pdev
->dev
, MVPP2_DESC_DMA_MASK
);
5242 /* Sadly, the BM pools all share the same register to
5243 * store the high 32 bits of their address. So they
5244 * must all have the same high 32 bits, which forces
5245 * us to restrict coherent memory to DMA_BIT_MASK(32).
5247 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
5252 /* Initialize network controller */
5253 err
= mvpp2_init(pdev
, priv
);
5255 dev_err(&pdev
->dev
, "failed to initialize controller\n");
5259 /* Initialize ports */
5260 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5261 err
= mvpp2_port_probe(pdev
, port_fwnode
, priv
);
5263 goto err_port_probe
;
5266 if (priv
->port_count
== 0) {
5267 dev_err(&pdev
->dev
, "no ports enabled\n");
5272 /* Statistics must be gathered regularly because some of them (like
5273 * packets counters) are 32-bit registers and could overflow quite
5274 * quickly. For instance, a 10Gb link used at full bandwidth with the
5275 * smallest packets (64B) will overflow a 32-bit counter in less than
5276 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
5278 snprintf(priv
->queue_name
, sizeof(priv
->queue_name
),
5279 "stats-wq-%s%s", netdev_name(priv
->port_list
[0]->dev
),
5280 priv
->port_count
> 1 ? "+" : "");
5281 priv
->stats_queue
= create_singlethread_workqueue(priv
->queue_name
);
5282 if (!priv
->stats_queue
) {
5284 goto err_port_probe
;
5287 mvpp2_dbgfs_init(priv
, pdev
->name
);
5289 platform_set_drvdata(pdev
, priv
);
5294 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5295 if (priv
->port_list
[i
])
5296 mvpp2_port_remove(priv
->port_list
[i
]);
5300 clk_disable_unprepare(priv
->axi_clk
);
5303 if (priv
->hw_version
== MVPP22
)
5304 clk_disable_unprepare(priv
->mg_core_clk
);
5306 if (priv
->hw_version
== MVPP22
)
5307 clk_disable_unprepare(priv
->mg_clk
);
5309 clk_disable_unprepare(priv
->gop_clk
);
5311 clk_disable_unprepare(priv
->pp_clk
);
5315 static int mvpp2_remove(struct platform_device
*pdev
)
5317 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
5318 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5319 struct fwnode_handle
*port_fwnode
;
5322 mvpp2_dbgfs_cleanup(priv
);
5324 flush_workqueue(priv
->stats_queue
);
5325 destroy_workqueue(priv
->stats_queue
);
5327 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5328 if (priv
->port_list
[i
]) {
5329 mutex_destroy(&priv
->port_list
[i
]->gather_stats_lock
);
5330 mvpp2_port_remove(priv
->port_list
[i
]);
5335 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
5336 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
5338 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
5341 for_each_present_cpu(i
) {
5342 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
5344 dma_free_coherent(&pdev
->dev
,
5345 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
5347 aggr_txq
->descs_dma
);
5350 if (is_acpi_node(port_fwnode
))
5353 clk_disable_unprepare(priv
->axi_clk
);
5354 clk_disable_unprepare(priv
->mg_core_clk
);
5355 clk_disable_unprepare(priv
->mg_clk
);
5356 clk_disable_unprepare(priv
->pp_clk
);
5357 clk_disable_unprepare(priv
->gop_clk
);
5362 static const struct of_device_id mvpp2_match
[] = {
5364 .compatible
= "marvell,armada-375-pp2",
5365 .data
= (void *)MVPP21
,
5368 .compatible
= "marvell,armada-7k-pp22",
5369 .data
= (void *)MVPP22
,
5373 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
5375 static const struct acpi_device_id mvpp2_acpi_match
[] = {
5376 { "MRVL0110", MVPP22
},
5379 MODULE_DEVICE_TABLE(acpi
, mvpp2_acpi_match
);
5381 static struct platform_driver mvpp2_driver
= {
5382 .probe
= mvpp2_probe
,
5383 .remove
= mvpp2_remove
,
5385 .name
= MVPP2_DRIVER_NAME
,
5386 .of_match_table
= mvpp2_match
,
5387 .acpi_match_table
= ACPI_PTR(mvpp2_acpi_match
),
5391 module_platform_driver(mvpp2_driver
);
5393 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
5394 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
5395 MODULE_LICENSE("GPL v2");