2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/interrupt.h>
23 #include <linux/cpumask.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_mdio.h>
27 #include <linux/of_net.h>
28 #include <linux/of_address.h>
29 #include <linux/of_device.h>
30 #include <linux/phy.h>
31 #include <linux/phy/phy.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <uapi/linux/ppp_defs.h>
41 /* RX Fifo Registers */
42 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
43 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
44 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
45 #define MVPP2_RX_FIFO_INIT_REG 0x64
47 /* RX DMA Top Registers */
48 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
49 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
50 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
51 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
52 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
53 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
54 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
55 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
56 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
57 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
58 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
59 #define MVPP2_RXQ_POOL_LONG_OFFS 24
60 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
61 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
62 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
63 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
64 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
66 /* Parser Registers */
67 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
68 #define MVPP2_PRS_PORT_LU_MAX 0xf
69 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
70 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
71 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
72 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
73 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
74 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
75 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
76 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
77 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
78 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
79 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
80 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
81 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
82 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
83 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
85 /* Classifier Registers */
86 #define MVPP2_CLS_MODE_REG 0x1800
87 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
88 #define MVPP2_CLS_PORT_WAY_REG 0x1810
89 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
90 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
91 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
92 #define MVPP2_CLS_LKP_TBL_REG 0x1818
93 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
94 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
95 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
96 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
97 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
98 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
99 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
100 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
101 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
102 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
103 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
104 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
106 /* Descriptor Manager Top Registers */
107 #define MVPP2_RXQ_NUM_REG 0x2040
108 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
109 #define MVPP22_DESC_ADDR_OFFS 8
110 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
111 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
112 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
113 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
114 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
115 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
116 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
117 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
118 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
119 #define MVPP2_RXQ_THRESH_REG 0x204c
120 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
121 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
122 #define MVPP2_RXQ_INDEX_REG 0x2050
123 #define MVPP2_TXQ_NUM_REG 0x2080
124 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
125 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
126 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
127 #define MVPP2_TXQ_THRESH_REG 0x2094
128 #define MVPP2_TXQ_THRESH_OFFSET 16
129 #define MVPP2_TXQ_THRESH_MASK 0x3fff
130 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
131 #define MVPP2_TXQ_INDEX_REG 0x2098
132 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
133 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
134 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
135 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
136 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
137 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
138 #define MVPP2_TXQ_PENDING_REG 0x20a0
139 #define MVPP2_TXQ_PENDING_MASK 0x3fff
140 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
141 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
142 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
143 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
144 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
145 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
146 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
147 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
148 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
149 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
150 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
151 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
152 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
153 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
154 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
155 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
156 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
158 /* MBUS bridge registers */
159 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
160 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
161 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
162 #define MVPP2_BASE_ADDR_ENABLE 0x4060
164 /* AXI Bridge Registers */
165 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
166 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
167 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
168 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
169 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
170 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
171 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
172 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
173 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
174 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
175 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
176 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
178 /* Values for AXI Bridge registers */
179 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
180 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
182 #define MVPP22_AXI_CODE_CACHE_OFFS 0
183 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
185 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
186 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
187 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
189 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
190 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
192 /* Interrupt Cause and Mask registers */
193 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
194 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
196 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
197 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
198 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
200 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
201 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
202 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
203 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
205 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
206 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
208 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
209 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
210 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
211 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
213 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
214 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
215 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
216 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
217 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
218 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
219 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
220 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
221 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
222 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
223 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
224 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
225 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
226 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
227 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
228 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
229 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
230 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
231 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
233 /* Buffer Manager registers */
234 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
235 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
236 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
237 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
238 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
239 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
240 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
241 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
242 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
243 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
244 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
245 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
246 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
247 #define MVPP2_BM_START_MASK BIT(0)
248 #define MVPP2_BM_STOP_MASK BIT(1)
249 #define MVPP2_BM_STATE_MASK BIT(4)
250 #define MVPP2_BM_LOW_THRESH_OFFS 8
251 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
252 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
253 MVPP2_BM_LOW_THRESH_OFFS)
254 #define MVPP2_BM_HIGH_THRESH_OFFS 16
255 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
256 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
257 MVPP2_BM_HIGH_THRESH_OFFS)
258 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
259 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
260 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
261 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
262 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
263 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
264 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
265 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
266 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
267 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
268 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
269 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
270 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
271 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
272 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
273 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
274 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
275 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
276 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
277 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
278 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
279 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
280 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
282 /* TX Scheduler registers */
283 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
284 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
285 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
286 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
287 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
288 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
289 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
290 #define MVPP2_TXP_MTU_MAX 0x7FFFF
291 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
292 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
293 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
294 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
295 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
296 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
297 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
298 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
299 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
300 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
301 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
302 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
303 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
304 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
306 /* TX general registers */
307 #define MVPP2_TX_SNOOP_REG 0x8800
308 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
309 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
312 #define MVPP2_SRC_ADDR_MIDDLE 0x24
313 #define MVPP2_SRC_ADDR_HIGH 0x28
314 #define MVPP2_PHY_AN_CFG0_REG 0x34
315 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
316 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
317 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
319 /* Per-port registers */
320 #define MVPP2_GMAC_CTRL_0_REG 0x0
321 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
322 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
323 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
324 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
325 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
326 #define MVPP2_GMAC_CTRL_1_REG 0x4
327 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
328 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
329 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
330 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
331 #define MVPP2_GMAC_SA_LOW_OFFS 7
332 #define MVPP2_GMAC_CTRL_2_REG 0x8
333 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
334 #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
335 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
336 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
337 #define MVPP2_GMAC_DISABLE_PADDING BIT(5)
338 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
339 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
340 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
341 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
342 #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
343 #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
344 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
345 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
346 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
347 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
348 #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
349 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
350 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
351 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
352 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
353 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
354 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
355 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
356 #define MVPP22_GMAC_CTRL_4_REG 0x90
357 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
358 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
359 #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
360 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
362 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
363 * relative to port->base.
365 #define MVPP22_XLG_CTRL0_REG 0x100
366 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
367 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
368 #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
369 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
370 #define MVPP22_XLG_CTRL1_REG 0x104
371 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
372 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
373 #define MVPP22_XLG_CTRL3_REG 0x11c
374 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
375 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
376 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
378 #define MVPP22_XLG_CTRL4_REG 0x184
379 #define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
380 #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
381 #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
383 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
384 #define MVPP22_SMI_MISC_CFG_REG 0x1204
385 #define MVPP22_SMI_POLLING_EN BIT(10)
387 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
389 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
391 /* Descriptor ring Macros */
392 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
393 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
395 /* XPCS registers. PPv2.2 only */
396 #define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
397 #define MVPP22_MPCS_CTRL 0x14
398 #define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
399 #define MVPP22_MPCS_CLK_RESET 0x14c
400 #define MAC_CLK_RESET_SD_TX BIT(0)
401 #define MAC_CLK_RESET_SD_RX BIT(1)
402 #define MAC_CLK_RESET_MAC BIT(2)
403 #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
404 #define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
406 /* XPCS registers. PPv2.2 only */
407 #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
408 #define MVPP22_XPCS_CFG0 0x0
409 #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
410 #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
412 /* System controller registers. Accessed through a regmap. */
413 #define GENCONF_SOFT_RESET1 0x1108
414 #define GENCONF_SOFT_RESET1_GOP BIT(6)
415 #define GENCONF_PORT_CTRL0 0x1110
416 #define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
417 #define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
418 #define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
419 #define GENCONF_PORT_CTRL1 0x1114
420 #define GENCONF_PORT_CTRL1_EN(p) BIT(p)
421 #define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
422 #define GENCONF_CTRL0 0x1120
423 #define GENCONF_CTRL0_PORT0_RGMII BIT(0)
424 #define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
425 #define GENCONF_CTRL0_PORT1_RGMII BIT(2)
427 /* Various constants */
430 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
431 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
432 #define MVPP2_TXDONE_COAL_USEC 1000
433 #define MVPP2_RX_COAL_PKTS 32
434 #define MVPP2_RX_COAL_USEC 100
436 /* The two bytes Marvell header. Either contains a special value used
437 * by Marvell switches when a specific hardware mode is enabled (not
438 * supported by this driver) or is filled automatically by zeroes on
439 * the RX side. Those two bytes being at the front of the Ethernet
440 * header, they allow to have the IP header aligned on a 4 bytes
441 * boundary automatically: the hardware skips those two bytes on its
444 #define MVPP2_MH_SIZE 2
445 #define MVPP2_ETH_TYPE_LEN 2
446 #define MVPP2_PPPOE_HDR_SIZE 8
447 #define MVPP2_VLAN_TAG_LEN 4
449 /* Lbtd 802.3 type */
450 #define MVPP2_IP_LBDT_TYPE 0xfffa
452 #define MVPP2_TX_CSUM_MAX_SIZE 9800
454 /* Timeout constants */
455 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
456 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
458 #define MVPP2_TX_MTU_MAX 0x7ffff
460 /* Maximum number of T-CONTs of PON port */
461 #define MVPP2_MAX_TCONT 16
463 /* Maximum number of supported ports */
464 #define MVPP2_MAX_PORTS 4
466 /* Maximum number of TXQs used by single port */
467 #define MVPP2_MAX_TXQ 8
469 /* Dfault number of RXQs in use */
470 #define MVPP2_DEFAULT_RXQ 4
472 /* Max number of Rx descriptors */
473 #define MVPP2_MAX_RXD 128
475 /* Max number of Tx descriptors */
476 #define MVPP2_MAX_TXD 1024
478 /* Amount of Tx descriptors that can be reserved at once by CPU */
479 #define MVPP2_CPU_DESC_CHUNK 64
481 /* Max number of Tx descriptors in each aggregated queue */
482 #define MVPP2_AGGR_TXQ_SIZE 256
484 /* Descriptor aligned size */
485 #define MVPP2_DESC_ALIGNED_SIZE 32
487 /* Descriptor alignment mask */
488 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
490 /* RX FIFO constants */
491 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
492 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
493 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
495 /* RX buffer constants */
496 #define MVPP2_SKB_SHINFO_SIZE \
497 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
499 #define MVPP2_RX_PKT_SIZE(mtu) \
500 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
501 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
503 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
504 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
505 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
506 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
508 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
510 /* IPv6 max L3 address size */
511 #define MVPP2_MAX_L3_ADDR_SIZE 16
514 #define MVPP2_F_LOOPBACK BIT(0)
516 /* Marvell tag types */
517 enum mvpp2_tag_type
{
518 MVPP2_TAG_TYPE_NONE
= 0,
519 MVPP2_TAG_TYPE_MH
= 1,
520 MVPP2_TAG_TYPE_DSA
= 2,
521 MVPP2_TAG_TYPE_EDSA
= 3,
522 MVPP2_TAG_TYPE_VLAN
= 4,
523 MVPP2_TAG_TYPE_LAST
= 5
526 /* Parser constants */
527 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
528 #define MVPP2_PRS_TCAM_WORDS 6
529 #define MVPP2_PRS_SRAM_WORDS 4
530 #define MVPP2_PRS_FLOW_ID_SIZE 64
531 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
532 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
533 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
534 #define MVPP2_PRS_IPV4_HEAD 0x40
535 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
536 #define MVPP2_PRS_IPV4_MC 0xe0
537 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
538 #define MVPP2_PRS_IPV4_BC_MASK 0xff
539 #define MVPP2_PRS_IPV4_IHL 0x5
540 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
541 #define MVPP2_PRS_IPV6_MC 0xff
542 #define MVPP2_PRS_IPV6_MC_MASK 0xff
543 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
544 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
545 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
546 #define MVPP2_PRS_DBL_VLANS_MAX 100
549 * - lookup ID - 4 bits
551 * - additional information - 1 byte
552 * - header data - 8 bytes
553 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
555 #define MVPP2_PRS_AI_BITS 8
556 #define MVPP2_PRS_PORT_MASK 0xff
557 #define MVPP2_PRS_LU_MASK 0xf
558 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
559 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
560 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
561 (((offs) * 2) - ((offs) % 2) + 2)
562 #define MVPP2_PRS_TCAM_AI_BYTE 16
563 #define MVPP2_PRS_TCAM_PORT_BYTE 17
564 #define MVPP2_PRS_TCAM_LU_BYTE 20
565 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
566 #define MVPP2_PRS_TCAM_INV_WORD 5
567 /* Tcam entries ID */
568 #define MVPP2_PE_DROP_ALL 0
569 #define MVPP2_PE_FIRST_FREE_TID 1
570 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
571 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
572 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
573 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
574 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
575 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
576 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
577 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
578 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
579 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
580 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
581 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
582 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
583 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
584 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
585 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
586 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
587 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
588 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
589 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
590 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
591 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
592 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
593 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
594 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
597 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
599 #define MVPP2_PRS_SRAM_RI_OFFS 0
600 #define MVPP2_PRS_SRAM_RI_WORD 0
601 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
602 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
603 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
604 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
605 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
606 #define MVPP2_PRS_SRAM_UDF_OFFS 73
607 #define MVPP2_PRS_SRAM_UDF_BITS 8
608 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
609 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
610 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
611 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
612 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
613 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
614 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
615 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
616 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
617 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
618 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
619 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
620 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
621 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
622 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
623 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
624 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
625 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
626 #define MVPP2_PRS_SRAM_AI_OFFS 90
627 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
628 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
629 #define MVPP2_PRS_SRAM_AI_MASK 0xff
630 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
631 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
632 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
633 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
635 /* Sram result info bits assignment */
636 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
637 #define MVPP2_PRS_RI_DSA_MASK 0x2
638 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
639 #define MVPP2_PRS_RI_VLAN_NONE 0x0
640 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
641 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
642 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
643 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
644 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
645 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
646 #define MVPP2_PRS_RI_L2_UCAST 0x0
647 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
648 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
649 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
650 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
651 #define MVPP2_PRS_RI_L3_UN 0x0
652 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
653 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
654 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
655 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
656 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
657 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
658 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
659 #define MVPP2_PRS_RI_L3_UCAST 0x0
660 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
661 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
662 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
663 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
664 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
665 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
666 #define MVPP2_PRS_RI_L4_TCP BIT(22)
667 #define MVPP2_PRS_RI_L4_UDP BIT(23)
668 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
669 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
670 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
671 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
673 /* Sram additional info bits assignment */
674 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
675 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
676 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
677 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
678 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
679 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
680 #define MVPP2_PRS_SINGLE_VLAN_AI 0
681 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
684 #define MVPP2_PRS_TAGGED true
685 #define MVPP2_PRS_UNTAGGED false
686 #define MVPP2_PRS_EDSA true
687 #define MVPP2_PRS_DSA false
689 /* MAC entries, shadow udf */
691 MVPP2_PRS_UDF_MAC_DEF
,
692 MVPP2_PRS_UDF_MAC_RANGE
,
693 MVPP2_PRS_UDF_L2_DEF
,
694 MVPP2_PRS_UDF_L2_DEF_COPY
,
695 MVPP2_PRS_UDF_L2_USER
,
699 enum mvpp2_prs_lookup
{
713 enum mvpp2_prs_l3_cast
{
714 MVPP2_PRS_L3_UNI_CAST
,
715 MVPP2_PRS_L3_MULTI_CAST
,
716 MVPP2_PRS_L3_BROAD_CAST
719 /* Classifier constants */
720 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
721 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
722 #define MVPP2_CLS_LKP_TBL_SIZE 64
725 #define MVPP2_BM_POOLS_NUM 8
726 #define MVPP2_BM_LONG_BUF_NUM 1024
727 #define MVPP2_BM_SHORT_BUF_NUM 2048
728 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
729 #define MVPP2_BM_POOL_PTR_ALIGN 128
730 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
731 #define MVPP2_BM_SWF_SHORT_POOL 3
733 /* BM cookie (32 bits) definition */
734 #define MVPP2_BM_COOKIE_POOL_OFFS 8
735 #define MVPP2_BM_COOKIE_CPU_OFFS 24
737 /* BM short pool packet size
738 * These value assure that for SWF the total number
739 * of bytes allocated for each buffer will be 512
741 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
743 #define MVPP21_ADDR_SPACE_SZ 0
744 #define MVPP22_ADDR_SPACE_SZ SZ_64K
746 #define MVPP2_MAX_THREADS 8
747 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
757 /* Shared Packet Processor resources */
759 /* Shared registers' base addresses */
760 void __iomem
*lms_base
;
761 void __iomem
*iface_base
;
763 /* On PPv2.2, each "software thread" can access the base
764 * register through a separate address space, each 64 KB apart
765 * from each other. Typically, such address spaces will be
768 void __iomem
*swth_base
[MVPP2_MAX_THREADS
];
770 /* On PPv2.2, some port control registers are located into the system
771 * controller space. These registers are accessible through a regmap.
773 struct regmap
*sysctrl_base
;
780 /* List of pointers to port structures */
781 struct mvpp2_port
**port_list
;
783 /* Aggregated TXQs */
784 struct mvpp2_tx_queue
*aggr_txqs
;
787 struct mvpp2_bm_pool
*bm_pools
;
789 /* PRS shadow table */
790 struct mvpp2_prs_shadow
*prs_shadow
;
791 /* PRS auxiliary table for double vlan entries control */
792 bool *prs_double_vlans
;
798 enum { MVPP21
, MVPP22
} hw_version
;
800 /* Maximum number of RXQs per port */
801 unsigned int max_port_rxqs
;
804 struct mvpp2_pcpu_stats
{
805 struct u64_stats_sync syncp
;
812 /* Per-CPU port control */
813 struct mvpp2_port_pcpu
{
814 struct hrtimer tx_done_timer
;
815 bool timer_scheduled
;
816 /* Tasklet for egress finalization */
817 struct tasklet_struct tx_done_tasklet
;
820 struct mvpp2_queue_vector
{
822 struct napi_struct napi
;
823 enum { MVPP2_QUEUE_VECTOR_SHARED
, MVPP2_QUEUE_VECTOR_PRIVATE
} type
;
828 u32 pending_cause_rx
;
829 struct mvpp2_port
*port
;
835 /* Index of the port from the "group of ports" complex point
842 /* Per-port registers' base address */
845 struct mvpp2_rx_queue
**rxqs
;
847 struct mvpp2_tx_queue
**txqs
;
849 struct net_device
*dev
;
853 /* Per-CPU port control */
854 struct mvpp2_port_pcpu __percpu
*pcpu
;
861 struct mvpp2_pcpu_stats __percpu
*stats
;
863 phy_interface_t phy_interface
;
864 struct device_node
*phy_node
;
870 struct mvpp2_bm_pool
*pool_long
;
871 struct mvpp2_bm_pool
*pool_short
;
873 /* Index of first port's physical RXQ */
876 struct mvpp2_queue_vector qvecs
[MVPP2_MAX_QVECS
];
883 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
884 * layout of the transmit and reception DMA descriptors, and their
885 * layout is therefore defined by the hardware design
888 #define MVPP2_TXD_L3_OFF_SHIFT 0
889 #define MVPP2_TXD_IP_HLEN_SHIFT 8
890 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
891 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
892 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
893 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
894 #define MVPP2_TXD_L4_UDP BIT(24)
895 #define MVPP2_TXD_L3_IP6 BIT(26)
896 #define MVPP2_TXD_L_DESC BIT(28)
897 #define MVPP2_TXD_F_DESC BIT(29)
899 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
900 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
901 #define MVPP2_RXD_ERR_CRC 0x0
902 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
903 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
904 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
905 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
906 #define MVPP2_RXD_HWF_SYNC BIT(21)
907 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
908 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
909 #define MVPP2_RXD_L4_TCP BIT(25)
910 #define MVPP2_RXD_L4_UDP BIT(26)
911 #define MVPP2_RXD_L3_IP4 BIT(28)
912 #define MVPP2_RXD_L3_IP6 BIT(30)
913 #define MVPP2_RXD_BUF_HDR BIT(31)
915 /* HW TX descriptor for PPv2.1 */
916 struct mvpp21_tx_desc
{
917 u32 command
; /* Options used by HW for packet transmitting.*/
918 u8 packet_offset
; /* the offset from the buffer beginning */
919 u8 phys_txq
; /* destination queue ID */
920 u16 data_size
; /* data size of transmitted packet in bytes */
921 u32 buf_dma_addr
; /* physical addr of transmitted buffer */
922 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
923 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
924 u32 reserved2
; /* reserved (for future use) */
927 /* HW RX descriptor for PPv2.1 */
928 struct mvpp21_rx_desc
{
929 u32 status
; /* info about received packet */
930 u16 reserved1
; /* parser_info (for future use, PnC) */
931 u16 data_size
; /* size of received packet in bytes */
932 u32 buf_dma_addr
; /* physical address of the buffer */
933 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
934 u16 reserved2
; /* gem_port_id (for future use, PON) */
935 u16 reserved3
; /* csum_l4 (for future use, PnC) */
936 u8 reserved4
; /* bm_qset (for future use, BM) */
938 u16 reserved6
; /* classify_info (for future use, PnC) */
939 u32 reserved7
; /* flow_id (for future use, PnC) */
943 /* HW TX descriptor for PPv2.2 */
944 struct mvpp22_tx_desc
{
950 u64 buf_dma_addr_ptp
;
954 /* HW RX descriptor for PPv2.2 */
955 struct mvpp22_rx_desc
{
961 u64 buf_dma_addr_key_hash
;
965 /* Opaque type used by the driver to manipulate the HW TX and RX
968 struct mvpp2_tx_desc
{
970 struct mvpp21_tx_desc pp21
;
971 struct mvpp22_tx_desc pp22
;
975 struct mvpp2_rx_desc
{
977 struct mvpp21_rx_desc pp21
;
978 struct mvpp22_rx_desc pp22
;
982 struct mvpp2_txq_pcpu_buf
{
983 /* Transmitted SKB */
986 /* Physical address of transmitted buffer */
989 /* Size transmitted */
993 /* Per-CPU Tx queue control */
994 struct mvpp2_txq_pcpu
{
997 /* Number of Tx DMA descriptors in the descriptor ring */
1000 /* Number of currently used Tx DMA descriptor in the
1005 /* Number of Tx DMA descriptors reserved for each CPU */
1008 /* Infos about transmitted buffers */
1009 struct mvpp2_txq_pcpu_buf
*buffs
;
1011 /* Index of last TX DMA descriptor that was inserted */
1014 /* Index of the TX DMA descriptor to be cleaned up */
1017 /* DMA buffer for TSO headers */
1019 dma_addr_t tso_headers_dma
;
1022 struct mvpp2_tx_queue
{
1023 /* Physical number of this Tx queue */
1026 /* Logical number of this Tx queue */
1029 /* Number of Tx DMA descriptors in the descriptor ring */
1032 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1035 /* Per-CPU control of physical Tx queues */
1036 struct mvpp2_txq_pcpu __percpu
*pcpu
;
1040 /* Virtual address of thex Tx DMA descriptors array */
1041 struct mvpp2_tx_desc
*descs
;
1043 /* DMA address of the Tx DMA descriptors array */
1044 dma_addr_t descs_dma
;
1046 /* Index of the last Tx DMA descriptor */
1049 /* Index of the next Tx DMA descriptor to process */
1050 int next_desc_to_proc
;
1053 struct mvpp2_rx_queue
{
1054 /* RX queue number, in the range 0-31 for physical RXQs */
1057 /* Num of rx descriptors in the rx descriptor ring */
1063 /* Virtual address of the RX DMA descriptors array */
1064 struct mvpp2_rx_desc
*descs
;
1066 /* DMA address of the RX DMA descriptors array */
1067 dma_addr_t descs_dma
;
1069 /* Index of the last RX DMA descriptor */
1072 /* Index of the next RX DMA descriptor to process */
1073 int next_desc_to_proc
;
1075 /* ID of port to which physical RXQ is mapped */
1078 /* Port's logic RXQ number to which physical RXQ is mapped */
1082 union mvpp2_prs_tcam_entry
{
1083 u32 word
[MVPP2_PRS_TCAM_WORDS
];
1084 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
1087 union mvpp2_prs_sram_entry
{
1088 u32 word
[MVPP2_PRS_SRAM_WORDS
];
1089 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
1092 struct mvpp2_prs_entry
{
1094 union mvpp2_prs_tcam_entry tcam
;
1095 union mvpp2_prs_sram_entry sram
;
1098 struct mvpp2_prs_shadow
{
1105 /* User defined offset */
1113 struct mvpp2_cls_flow_entry
{
1115 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
1118 struct mvpp2_cls_lookup_entry
{
1124 struct mvpp2_bm_pool
{
1125 /* Pool number in the range 0-7 */
1127 enum mvpp2_bm_type type
;
1129 /* Buffer Pointers Pool External (BPPE) size */
1131 /* BPPE size in bytes */
1133 /* Number of buffers for this pool */
1135 /* Pool buffer size */
1141 /* BPPE virtual base address */
1143 /* BPPE DMA base address */
1144 dma_addr_t dma_addr
;
1146 /* Ports using BM pool */
1151 #define MVPP2_QDIST_SINGLE_MODE 0
1152 #define MVPP2_QDIST_MULTI_MODE 1
1154 static int queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
1156 module_param(queue_mode
, int, 0444);
1157 MODULE_PARM_DESC(queue_mode
, "Set queue_mode (single=0, multi=1)");
1159 #define MVPP2_DRIVER_NAME "mvpp2"
1160 #define MVPP2_DRIVER_VERSION "1.0"
1162 /* Utility/helper methods */
1164 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
1166 writel(data
, priv
->swth_base
[0] + offset
);
1169 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
1171 return readl(priv
->swth_base
[0] + offset
);
1174 /* These accessors should be used to access:
1176 * - per-CPU registers, where each CPU has its own copy of the
1179 * MVPP2_BM_VIRT_ALLOC_REG
1180 * MVPP2_BM_ADDR_HIGH_ALLOC
1181 * MVPP22_BM_ADDR_HIGH_RLS_REG
1182 * MVPP2_BM_VIRT_RLS_REG
1183 * MVPP2_ISR_RX_TX_CAUSE_REG
1184 * MVPP2_ISR_RX_TX_MASK_REG
1186 * MVPP2_AGGR_TXQ_UPDATE_REG
1187 * MVPP2_TXQ_RSVD_REQ_REG
1188 * MVPP2_TXQ_RSVD_RSLT_REG
1189 * MVPP2_TXQ_SENT_REG
1192 * - global registers that must be accessed through a specific CPU
1193 * window, because they are related to an access to a per-CPU
1196 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1197 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1198 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1199 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1200 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1201 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1202 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1203 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1204 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1205 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1206 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1207 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1208 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1210 static void mvpp2_percpu_write(struct mvpp2
*priv
, int cpu
,
1211 u32 offset
, u32 data
)
1213 writel(data
, priv
->swth_base
[cpu
] + offset
);
1216 static u32
mvpp2_percpu_read(struct mvpp2
*priv
, int cpu
,
1219 return readl(priv
->swth_base
[cpu
] + offset
);
1222 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
1223 struct mvpp2_tx_desc
*tx_desc
)
1225 if (port
->priv
->hw_version
== MVPP21
)
1226 return tx_desc
->pp21
.buf_dma_addr
;
1228 return tx_desc
->pp22
.buf_dma_addr_ptp
& GENMASK_ULL(40, 0);
1231 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
1232 struct mvpp2_tx_desc
*tx_desc
,
1233 dma_addr_t dma_addr
)
1235 if (port
->priv
->hw_version
== MVPP21
) {
1236 tx_desc
->pp21
.buf_dma_addr
= dma_addr
;
1238 u64 val
= (u64
)dma_addr
;
1240 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~GENMASK_ULL(40, 0);
1241 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
1245 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
1246 struct mvpp2_tx_desc
*tx_desc
)
1248 if (port
->priv
->hw_version
== MVPP21
)
1249 return tx_desc
->pp21
.data_size
;
1251 return tx_desc
->pp22
.data_size
;
1254 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
1255 struct mvpp2_tx_desc
*tx_desc
,
1258 if (port
->priv
->hw_version
== MVPP21
)
1259 tx_desc
->pp21
.data_size
= size
;
1261 tx_desc
->pp22
.data_size
= size
;
1264 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
1265 struct mvpp2_tx_desc
*tx_desc
,
1268 if (port
->priv
->hw_version
== MVPP21
)
1269 tx_desc
->pp21
.phys_txq
= txq
;
1271 tx_desc
->pp22
.phys_txq
= txq
;
1274 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
1275 struct mvpp2_tx_desc
*tx_desc
,
1276 unsigned int command
)
1278 if (port
->priv
->hw_version
== MVPP21
)
1279 tx_desc
->pp21
.command
= command
;
1281 tx_desc
->pp22
.command
= command
;
1284 static void mvpp2_txdesc_offset_set(struct mvpp2_port
*port
,
1285 struct mvpp2_tx_desc
*tx_desc
,
1286 unsigned int offset
)
1288 if (port
->priv
->hw_version
== MVPP21
)
1289 tx_desc
->pp21
.packet_offset
= offset
;
1291 tx_desc
->pp22
.packet_offset
= offset
;
1294 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
1295 struct mvpp2_tx_desc
*tx_desc
)
1297 if (port
->priv
->hw_version
== MVPP21
)
1298 return tx_desc
->pp21
.packet_offset
;
1300 return tx_desc
->pp22
.packet_offset
;
1303 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
1304 struct mvpp2_rx_desc
*rx_desc
)
1306 if (port
->priv
->hw_version
== MVPP21
)
1307 return rx_desc
->pp21
.buf_dma_addr
;
1309 return rx_desc
->pp22
.buf_dma_addr_key_hash
& GENMASK_ULL(40, 0);
1312 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
1313 struct mvpp2_rx_desc
*rx_desc
)
1315 if (port
->priv
->hw_version
== MVPP21
)
1316 return rx_desc
->pp21
.buf_cookie
;
1318 return rx_desc
->pp22
.buf_cookie_misc
& GENMASK_ULL(40, 0);
1321 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
1322 struct mvpp2_rx_desc
*rx_desc
)
1324 if (port
->priv
->hw_version
== MVPP21
)
1325 return rx_desc
->pp21
.data_size
;
1327 return rx_desc
->pp22
.data_size
;
1330 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
1331 struct mvpp2_rx_desc
*rx_desc
)
1333 if (port
->priv
->hw_version
== MVPP21
)
1334 return rx_desc
->pp21
.status
;
1336 return rx_desc
->pp22
.status
;
1339 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
1341 txq_pcpu
->txq_get_index
++;
1342 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
1343 txq_pcpu
->txq_get_index
= 0;
1346 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
1347 struct mvpp2_txq_pcpu
*txq_pcpu
,
1348 struct sk_buff
*skb
,
1349 struct mvpp2_tx_desc
*tx_desc
)
1351 struct mvpp2_txq_pcpu_buf
*tx_buf
=
1352 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
1354 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
1355 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
1356 mvpp2_txdesc_offset_get(port
, tx_desc
);
1357 txq_pcpu
->txq_put_index
++;
1358 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
1359 txq_pcpu
->txq_put_index
= 0;
1362 /* Get number of physical egress port */
1363 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
1365 return MVPP2_MAX_TCONT
+ port
->id
;
1368 /* Get number of physical TXQ */
1369 static inline int mvpp2_txq_phys(int port
, int txq
)
1371 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
1374 /* Parser configuration routines */
1376 /* Update parser tcam and sram hw entries */
1377 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1381 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1384 /* Clear entry invalidation bit */
1385 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
1387 /* Write tcam index - indirect access */
1388 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1389 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1390 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1392 /* Write sram index - indirect access */
1393 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1394 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1395 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1400 /* Read tcam entry from hw */
1401 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1405 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1408 /* Write tcam index - indirect access */
1409 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1411 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1412 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1413 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1414 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1416 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1417 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1419 /* Write sram index - indirect access */
1420 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1421 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1422 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1427 /* Invalidate tcam hw entry */
1428 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1430 /* Write index - indirect access */
1431 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1432 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1433 MVPP2_PRS_TCAM_INV_MASK
);
1436 /* Enable shadow table entry and set its lookup ID */
1437 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1439 priv
->prs_shadow
[index
].valid
= true;
1440 priv
->prs_shadow
[index
].lu
= lu
;
1443 /* Update ri fields in shadow table entry */
1444 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1445 unsigned int ri
, unsigned int ri_mask
)
1447 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1448 priv
->prs_shadow
[index
].ri
= ri
;
1451 /* Update lookup field in tcam sw entry */
1452 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1454 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1456 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1457 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1460 /* Update mask for single port in tcam sw entry */
1461 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1462 unsigned int port
, bool add
)
1464 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1467 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1469 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1472 /* Update port map in tcam sw entry */
1473 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1476 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1477 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1479 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1480 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1481 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1484 /* Obtain port map from tcam sw entry */
1485 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1487 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1489 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1492 /* Set byte of data and its enable bits in tcam sw entry */
1493 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1494 unsigned int offs
, unsigned char byte
,
1495 unsigned char enable
)
1497 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1498 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1501 /* Get byte of data and its enable bits from tcam sw entry */
1502 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1503 unsigned int offs
, unsigned char *byte
,
1504 unsigned char *enable
)
1506 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1507 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1510 /* Compare tcam data bytes with a pattern */
1511 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
1514 int off
= MVPP2_PRS_TCAM_DATA_BYTE(offs
);
1517 tcam_data
= (8 << pe
->tcam
.byte
[off
+ 1]) | pe
->tcam
.byte
[off
];
1518 if (tcam_data
!= data
)
1523 /* Update ai bits in tcam sw entry */
1524 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
1525 unsigned int bits
, unsigned int enable
)
1527 int i
, ai_idx
= MVPP2_PRS_TCAM_AI_BYTE
;
1529 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
1531 if (!(enable
& BIT(i
)))
1535 pe
->tcam
.byte
[ai_idx
] |= 1 << i
;
1537 pe
->tcam
.byte
[ai_idx
] &= ~(1 << i
);
1540 pe
->tcam
.byte
[MVPP2_PRS_TCAM_EN_OFFS(ai_idx
)] |= enable
;
1543 /* Get ai bits from tcam sw entry */
1544 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
1546 return pe
->tcam
.byte
[MVPP2_PRS_TCAM_AI_BYTE
];
1549 /* Set ethertype in tcam sw entry */
1550 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1551 unsigned short ethertype
)
1553 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1554 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1557 /* Set bits in sram sw entry */
1558 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1561 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1564 /* Clear bits in sram sw entry */
1565 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1568 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1571 /* Update ri bits in sram sw entry */
1572 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1573 unsigned int bits
, unsigned int mask
)
1577 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1578 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1580 if (!(mask
& BIT(i
)))
1584 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1586 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1588 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1592 /* Obtain ri bits from sram sw entry */
1593 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
1595 return pe
->sram
.word
[MVPP2_PRS_SRAM_RI_WORD
];
1598 /* Update ai bits in sram sw entry */
1599 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1600 unsigned int bits
, unsigned int mask
)
1603 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1605 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1607 if (!(mask
& BIT(i
)))
1611 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1613 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1615 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1619 /* Read ai bits from sram sw entry */
1620 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1623 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1624 int ai_en_off
= ai_off
+ 1;
1625 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1627 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1628 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1633 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1636 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1639 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1641 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1642 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1643 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1646 /* In the sram sw entry set sign and value of the next lookup offset
1647 * and the offset value generated to the classifier
1649 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1654 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1657 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1661 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1662 (unsigned char)shift
;
1664 /* Reset and set operation */
1665 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1666 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1667 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1669 /* Set base offset as current */
1670 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1673 /* In the sram sw entry set sign and value of the user defined offset
1674 * generated to the classifier
1676 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1677 unsigned int type
, int offset
,
1682 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1683 offset
= 0 - offset
;
1685 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1689 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1690 MVPP2_PRS_SRAM_UDF_MASK
);
1691 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1692 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1693 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1694 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1695 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1696 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1697 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1699 /* Set offset type */
1700 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1701 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1702 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1704 /* Set offset operation */
1705 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1706 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1707 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1709 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1710 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1711 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1712 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1714 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1715 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1716 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1718 /* Set base offset as current */
1719 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1722 /* Find parser flow entry */
1723 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1725 struct mvpp2_prs_entry
*pe
;
1728 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1731 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1733 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1734 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1737 if (!priv
->prs_shadow
[tid
].valid
||
1738 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1742 mvpp2_prs_hw_read(priv
, pe
);
1743 bits
= mvpp2_prs_sram_ai_get(pe
);
1745 /* Sram store classification lookup ID in AI bits [5:0] */
1746 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1754 /* Return first free tcam index, seeking from start to end */
1755 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1763 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1764 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1766 for (tid
= start
; tid
<= end
; tid
++) {
1767 if (!priv
->prs_shadow
[tid
].valid
)
1774 /* Enable/disable dropping all mac da's */
1775 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1777 struct mvpp2_prs_entry pe
;
1779 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1780 /* Entry exist - update port only */
1781 pe
.index
= MVPP2_PE_DROP_ALL
;
1782 mvpp2_prs_hw_read(priv
, &pe
);
1784 /* Entry doesn't exist - create new */
1785 memset(&pe
, 0, sizeof(pe
));
1786 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1787 pe
.index
= MVPP2_PE_DROP_ALL
;
1789 /* Non-promiscuous mode for all ports - DROP unknown packets */
1790 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1791 MVPP2_PRS_RI_DROP_MASK
);
1793 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1794 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1796 /* Update shadow table */
1797 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1799 /* Mask all ports */
1800 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1803 /* Update port mask */
1804 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1806 mvpp2_prs_hw_write(priv
, &pe
);
1809 /* Set port to promiscuous mode */
1810 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1812 struct mvpp2_prs_entry pe
;
1814 /* Promiscuous mode - Accept unknown packets */
1816 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1817 /* Entry exist - update port only */
1818 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1819 mvpp2_prs_hw_read(priv
, &pe
);
1821 /* Entry doesn't exist - create new */
1822 memset(&pe
, 0, sizeof(pe
));
1823 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1824 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1826 /* Continue - set next lookup */
1827 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1829 /* Set result info bits */
1830 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1831 MVPP2_PRS_RI_L2_CAST_MASK
);
1833 /* Shift to ethertype */
1834 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1835 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1837 /* Mask all ports */
1838 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1840 /* Update shadow table */
1841 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1844 /* Update port mask */
1845 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1847 mvpp2_prs_hw_write(priv
, &pe
);
1850 /* Accept multicast */
1851 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1854 struct mvpp2_prs_entry pe
;
1855 unsigned char da_mc
;
1857 /* Ethernet multicast address first byte is
1858 * 0x01 for IPv4 and 0x33 for IPv6
1860 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1862 if (priv
->prs_shadow
[index
].valid
) {
1863 /* Entry exist - update port only */
1865 mvpp2_prs_hw_read(priv
, &pe
);
1867 /* Entry doesn't exist - create new */
1868 memset(&pe
, 0, sizeof(pe
));
1869 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1872 /* Continue - set next lookup */
1873 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1875 /* Set result info bits */
1876 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1877 MVPP2_PRS_RI_L2_CAST_MASK
);
1879 /* Update tcam entry data first byte */
1880 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1882 /* Shift to ethertype */
1883 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1884 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1886 /* Mask all ports */
1887 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1889 /* Update shadow table */
1890 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1893 /* Update port mask */
1894 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1896 mvpp2_prs_hw_write(priv
, &pe
);
1899 /* Set entry for dsa packets */
1900 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
1901 bool tagged
, bool extend
)
1903 struct mvpp2_prs_entry pe
;
1907 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
1910 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
1914 if (priv
->prs_shadow
[tid
].valid
) {
1915 /* Entry exist - update port only */
1917 mvpp2_prs_hw_read(priv
, &pe
);
1919 /* Entry doesn't exist - create new */
1920 memset(&pe
, 0, sizeof(pe
));
1921 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1924 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1925 mvpp2_prs_sram_shift_set(&pe
, shift
,
1926 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1928 /* Update shadow table */
1929 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1932 /* Set tagged bit in DSA tag */
1933 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
1934 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1935 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1936 /* Clear all ai bits for next iteration */
1937 mvpp2_prs_sram_ai_update(&pe
, 0,
1938 MVPP2_PRS_SRAM_AI_MASK
);
1939 /* If packet is tagged continue check vlans */
1940 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1942 /* Set result info bits to 'no vlans' */
1943 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1944 MVPP2_PRS_RI_VLAN_MASK
);
1945 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1948 /* Mask all ports */
1949 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1952 /* Update port mask */
1953 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1955 mvpp2_prs_hw_write(priv
, &pe
);
1958 /* Set entry for dsa ethertype */
1959 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
1960 bool add
, bool tagged
, bool extend
)
1962 struct mvpp2_prs_entry pe
;
1963 int tid
, shift
, port_mask
;
1966 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
1967 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
1971 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
1972 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
1973 port_mask
= MVPP2_PRS_PORT_MASK
;
1977 if (priv
->prs_shadow
[tid
].valid
) {
1978 /* Entry exist - update port only */
1980 mvpp2_prs_hw_read(priv
, &pe
);
1982 /* Entry doesn't exist - create new */
1983 memset(&pe
, 0, sizeof(pe
));
1984 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1988 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
1989 mvpp2_prs_match_etype(&pe
, 2, 0);
1991 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
1992 MVPP2_PRS_RI_DSA_MASK
);
1993 /* Shift ethertype + 2 byte reserved + tag*/
1994 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
1995 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1997 /* Update shadow table */
1998 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
2001 /* Set tagged bit in DSA tag */
2002 mvpp2_prs_tcam_data_byte_set(&pe
,
2003 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
2004 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
2005 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
2006 /* Clear all ai bits for next iteration */
2007 mvpp2_prs_sram_ai_update(&pe
, 0,
2008 MVPP2_PRS_SRAM_AI_MASK
);
2009 /* If packet is tagged continue check vlans */
2010 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2012 /* Set result info bits to 'no vlans' */
2013 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2014 MVPP2_PRS_RI_VLAN_MASK
);
2015 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2017 /* Mask/unmask all ports, depending on dsa type */
2018 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
2021 /* Update port mask */
2022 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
2024 mvpp2_prs_hw_write(priv
, &pe
);
2027 /* Search for existing single/triple vlan entry */
2028 static struct mvpp2_prs_entry
*mvpp2_prs_vlan_find(struct mvpp2
*priv
,
2029 unsigned short tpid
, int ai
)
2031 struct mvpp2_prs_entry
*pe
;
2034 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2037 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2039 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2040 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2041 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
2042 unsigned int ri_bits
, ai_bits
;
2045 if (!priv
->prs_shadow
[tid
].valid
||
2046 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
2051 mvpp2_prs_hw_read(priv
, pe
);
2052 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid
));
2057 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
2058 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
2060 /* Get current ai value from tcam */
2061 ai_bits
= mvpp2_prs_tcam_ai_get(pe
);
2062 /* Clear double vlan bit */
2063 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
2068 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
2069 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
2077 /* Add/update single/triple vlan entry */
2078 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
2079 unsigned int port_map
)
2081 struct mvpp2_prs_entry
*pe
;
2085 pe
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
2088 /* Create new tcam entry */
2089 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
2090 MVPP2_PE_FIRST_FREE_TID
);
2094 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2098 /* Get last double vlan tid */
2099 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
2100 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
2101 unsigned int ri_bits
;
2103 if (!priv
->prs_shadow
[tid_aux
].valid
||
2104 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
2107 pe
->index
= tid_aux
;
2108 mvpp2_prs_hw_read(priv
, pe
);
2109 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
2110 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
2111 MVPP2_PRS_RI_VLAN_DOUBLE
)
2115 if (tid
<= tid_aux
) {
2120 memset(pe
, 0, sizeof(*pe
));
2121 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2124 mvpp2_prs_match_etype(pe
, 0, tpid
);
2126 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_L2
);
2127 /* Shift 4 bytes - skip 1 vlan tag */
2128 mvpp2_prs_sram_shift_set(pe
, MVPP2_VLAN_TAG_LEN
,
2129 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2130 /* Clear all ai bits for next iteration */
2131 mvpp2_prs_sram_ai_update(pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2133 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
2134 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
2135 MVPP2_PRS_RI_VLAN_MASK
);
2137 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
2138 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
2139 MVPP2_PRS_RI_VLAN_MASK
);
2141 mvpp2_prs_tcam_ai_update(pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
2143 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
2145 /* Update ports' mask */
2146 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
2148 mvpp2_prs_hw_write(priv
, pe
);
2155 /* Get first free double vlan ai number */
2156 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
2160 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
2161 if (!priv
->prs_double_vlans
[i
])
2168 /* Search for existing double vlan entry */
2169 static struct mvpp2_prs_entry
*mvpp2_prs_double_vlan_find(struct mvpp2
*priv
,
2170 unsigned short tpid1
,
2171 unsigned short tpid2
)
2173 struct mvpp2_prs_entry
*pe
;
2176 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2179 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2181 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2182 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2183 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
2184 unsigned int ri_mask
;
2187 if (!priv
->prs_shadow
[tid
].valid
||
2188 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
2192 mvpp2_prs_hw_read(priv
, pe
);
2194 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid1
))
2195 && mvpp2_prs_tcam_data_cmp(pe
, 4, swab16(tpid2
));
2200 ri_mask
= mvpp2_prs_sram_ri_get(pe
) & MVPP2_PRS_RI_VLAN_MASK
;
2201 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
2209 /* Add or update double vlan entry */
2210 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
2211 unsigned short tpid2
,
2212 unsigned int port_map
)
2214 struct mvpp2_prs_entry
*pe
;
2215 int tid_aux
, tid
, ai
, ret
= 0;
2217 pe
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
2220 /* Create new tcam entry */
2221 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2222 MVPP2_PE_LAST_FREE_TID
);
2226 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2230 /* Set ai value for new double vlan entry */
2231 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
2237 /* Get first single/triple vlan tid */
2238 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
2239 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
2240 unsigned int ri_bits
;
2242 if (!priv
->prs_shadow
[tid_aux
].valid
||
2243 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
2246 pe
->index
= tid_aux
;
2247 mvpp2_prs_hw_read(priv
, pe
);
2248 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
2249 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
2250 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
2251 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
2255 if (tid
>= tid_aux
) {
2260 memset(pe
, 0, sizeof(*pe
));
2261 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2264 priv
->prs_double_vlans
[ai
] = true;
2266 mvpp2_prs_match_etype(pe
, 0, tpid1
);
2267 mvpp2_prs_match_etype(pe
, 4, tpid2
);
2269 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2270 /* Shift 8 bytes - skip 2 vlan tags */
2271 mvpp2_prs_sram_shift_set(pe
, 2 * MVPP2_VLAN_TAG_LEN
,
2272 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2273 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2274 MVPP2_PRS_RI_VLAN_MASK
);
2275 mvpp2_prs_sram_ai_update(pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
2276 MVPP2_PRS_SRAM_AI_MASK
);
2278 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
2281 /* Update ports' mask */
2282 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
2283 mvpp2_prs_hw_write(priv
, pe
);
2289 /* IPv4 header parsing for fragmentation and L4 offset */
2290 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
2291 unsigned int ri
, unsigned int ri_mask
)
2293 struct mvpp2_prs_entry pe
;
2296 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2297 (proto
!= IPPROTO_IGMP
))
2300 /* Fragmented packet */
2301 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2302 MVPP2_PE_LAST_FREE_TID
);
2306 memset(&pe
, 0, sizeof(pe
));
2307 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2310 /* Set next lu to IPv4 */
2311 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2312 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2314 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2315 sizeof(struct iphdr
) - 4,
2316 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2317 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2318 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2319 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_MASK
,
2320 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
2322 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2323 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2324 /* Unmask all ports */
2325 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2327 /* Update shadow table and hw entry */
2328 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2329 mvpp2_prs_hw_write(priv
, &pe
);
2331 /* Not fragmented packet */
2332 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2333 MVPP2_PE_LAST_FREE_TID
);
2338 /* Clear ri before updating */
2339 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2340 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2341 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2343 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L
);
2344 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK
);
2346 /* Update shadow table and hw entry */
2347 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2348 mvpp2_prs_hw_write(priv
, &pe
);
2353 /* IPv4 L3 multicast or broadcast */
2354 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2356 struct mvpp2_prs_entry pe
;
2359 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2360 MVPP2_PE_LAST_FREE_TID
);
2364 memset(&pe
, 0, sizeof(pe
));
2365 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2369 case MVPP2_PRS_L3_MULTI_CAST
:
2370 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
2371 MVPP2_PRS_IPV4_MC_MASK
);
2372 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2373 MVPP2_PRS_RI_L3_ADDR_MASK
);
2375 case MVPP2_PRS_L3_BROAD_CAST
:
2376 mask
= MVPP2_PRS_IPV4_BC_MASK
;
2377 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
2378 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
2379 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
2380 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
2381 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
2382 MVPP2_PRS_RI_L3_ADDR_MASK
);
2388 /* Finished: go to flowid generation */
2389 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2390 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2392 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2393 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2394 /* Unmask all ports */
2395 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2397 /* Update shadow table and hw entry */
2398 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2399 mvpp2_prs_hw_write(priv
, &pe
);
2404 /* Set entries for protocols over IPv6 */
2405 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
2406 unsigned int ri
, unsigned int ri_mask
)
2408 struct mvpp2_prs_entry pe
;
2411 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2412 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
2415 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2416 MVPP2_PE_LAST_FREE_TID
);
2420 memset(&pe
, 0, sizeof(pe
));
2421 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2424 /* Finished: go to flowid generation */
2425 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2426 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2427 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2428 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2429 sizeof(struct ipv6hdr
) - 6,
2430 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2432 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2433 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2434 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2435 /* Unmask all ports */
2436 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2439 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2440 mvpp2_prs_hw_write(priv
, &pe
);
2445 /* IPv6 L3 multicast entry */
2446 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2448 struct mvpp2_prs_entry pe
;
2451 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
2454 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2455 MVPP2_PE_LAST_FREE_TID
);
2459 memset(&pe
, 0, sizeof(pe
));
2460 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2463 /* Finished: go to flowid generation */
2464 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2465 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2466 MVPP2_PRS_RI_L3_ADDR_MASK
);
2467 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2468 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2469 /* Shift back to IPv6 NH */
2470 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2472 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
2473 MVPP2_PRS_IPV6_MC_MASK
);
2474 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2475 /* Unmask all ports */
2476 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2478 /* Update shadow table and hw entry */
2479 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2480 mvpp2_prs_hw_write(priv
, &pe
);
2485 /* Parser per-port initialization */
2486 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
2487 int lu_max
, int offset
)
2492 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
2493 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
2494 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
2495 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
2497 /* Set maximum number of loops for packet received from port */
2498 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
2499 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
2500 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
2501 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
2503 /* Set initial offset for packet header extraction for the first
2506 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
2507 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
2508 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
2509 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
2512 /* Default flow entries initialization for all ports */
2513 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
2515 struct mvpp2_prs_entry pe
;
2518 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
2519 memset(&pe
, 0, sizeof(pe
));
2520 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2521 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
2523 /* Mask all ports */
2524 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2527 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
2528 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2530 /* Update shadow table and hw entry */
2531 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2532 mvpp2_prs_hw_write(priv
, &pe
);
2536 /* Set default entry for Marvell Header field */
2537 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
2539 struct mvpp2_prs_entry pe
;
2541 memset(&pe
, 0, sizeof(pe
));
2543 pe
.index
= MVPP2_PE_MH_DEFAULT
;
2544 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
2545 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
2546 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2547 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2549 /* Unmask all ports */
2550 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2552 /* Update shadow table and hw entry */
2553 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
2554 mvpp2_prs_hw_write(priv
, &pe
);
2557 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2558 * multicast MAC addresses
2560 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
2562 struct mvpp2_prs_entry pe
;
2564 memset(&pe
, 0, sizeof(pe
));
2566 /* Non-promiscuous mode for all ports - DROP unknown packets */
2567 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
2568 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2570 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2571 MVPP2_PRS_RI_DROP_MASK
);
2572 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2573 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2575 /* Unmask all ports */
2576 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2578 /* Update shadow table and hw entry */
2579 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2580 mvpp2_prs_hw_write(priv
, &pe
);
2582 /* place holders only - no ports */
2583 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
2584 mvpp2_prs_mac_promisc_set(priv
, 0, false);
2585 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
2586 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2589 /* Set default entries for various types of dsa packets */
2590 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
2592 struct mvpp2_prs_entry pe
;
2594 /* None tagged EDSA entry - place holder */
2595 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2598 /* Tagged EDSA entry - place holder */
2599 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2601 /* None tagged DSA entry - place holder */
2602 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2605 /* Tagged DSA entry - place holder */
2606 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2608 /* None tagged EDSA ethertype entry - place holder*/
2609 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2610 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2612 /* Tagged EDSA ethertype entry - place holder*/
2613 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2614 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2616 /* None tagged DSA ethertype entry */
2617 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2618 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2620 /* Tagged DSA ethertype entry */
2621 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2622 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2624 /* Set default entry, in case DSA or EDSA tag not found */
2625 memset(&pe
, 0, sizeof(pe
));
2626 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2627 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
2628 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2631 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2632 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2634 /* Clear all sram ai bits for next iteration */
2635 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2637 /* Unmask all ports */
2638 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2640 mvpp2_prs_hw_write(priv
, &pe
);
2643 /* Match basic ethertypes */
2644 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2646 struct mvpp2_prs_entry pe
;
2649 /* Ethertype: PPPoE */
2650 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2651 MVPP2_PE_LAST_FREE_TID
);
2655 memset(&pe
, 0, sizeof(pe
));
2656 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2659 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
2661 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2662 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2663 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2664 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2665 MVPP2_PRS_RI_PPPOE_MASK
);
2667 /* Update shadow table and hw entry */
2668 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2669 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2670 priv
->prs_shadow
[pe
.index
].finish
= false;
2671 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2672 MVPP2_PRS_RI_PPPOE_MASK
);
2673 mvpp2_prs_hw_write(priv
, &pe
);
2675 /* Ethertype: ARP */
2676 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2677 MVPP2_PE_LAST_FREE_TID
);
2681 memset(&pe
, 0, sizeof(pe
));
2682 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2685 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
2687 /* Generate flow in the next iteration*/
2688 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2689 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2690 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2691 MVPP2_PRS_RI_L3_PROTO_MASK
);
2693 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2695 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2697 /* Update shadow table and hw entry */
2698 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2699 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2700 priv
->prs_shadow
[pe
.index
].finish
= true;
2701 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2702 MVPP2_PRS_RI_L3_PROTO_MASK
);
2703 mvpp2_prs_hw_write(priv
, &pe
);
2705 /* Ethertype: LBTD */
2706 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2707 MVPP2_PE_LAST_FREE_TID
);
2711 memset(&pe
, 0, sizeof(pe
));
2712 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2715 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2717 /* Generate flow in the next iteration*/
2718 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2719 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2720 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2721 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2722 MVPP2_PRS_RI_CPU_CODE_MASK
|
2723 MVPP2_PRS_RI_UDF3_MASK
);
2725 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2727 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2729 /* Update shadow table and hw entry */
2730 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2731 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2732 priv
->prs_shadow
[pe
.index
].finish
= true;
2733 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2734 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2735 MVPP2_PRS_RI_CPU_CODE_MASK
|
2736 MVPP2_PRS_RI_UDF3_MASK
);
2737 mvpp2_prs_hw_write(priv
, &pe
);
2739 /* Ethertype: IPv4 without options */
2740 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2741 MVPP2_PE_LAST_FREE_TID
);
2745 memset(&pe
, 0, sizeof(pe
));
2746 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2749 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
2750 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2751 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2752 MVPP2_PRS_IPV4_HEAD_MASK
|
2753 MVPP2_PRS_IPV4_IHL_MASK
);
2755 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2756 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2757 MVPP2_PRS_RI_L3_PROTO_MASK
);
2758 /* Skip eth_type + 4 bytes of IP header */
2759 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2760 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2762 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2764 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2766 /* Update shadow table and hw entry */
2767 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2768 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2769 priv
->prs_shadow
[pe
.index
].finish
= false;
2770 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2771 MVPP2_PRS_RI_L3_PROTO_MASK
);
2772 mvpp2_prs_hw_write(priv
, &pe
);
2774 /* Ethertype: IPv4 with options */
2775 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2776 MVPP2_PE_LAST_FREE_TID
);
2782 /* Clear tcam data before updating */
2783 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2784 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2786 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2787 MVPP2_PRS_IPV4_HEAD
,
2788 MVPP2_PRS_IPV4_HEAD_MASK
);
2790 /* Clear ri before updating */
2791 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2792 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2793 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2794 MVPP2_PRS_RI_L3_PROTO_MASK
);
2796 /* Update shadow table and hw entry */
2797 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2798 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2799 priv
->prs_shadow
[pe
.index
].finish
= false;
2800 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2801 MVPP2_PRS_RI_L3_PROTO_MASK
);
2802 mvpp2_prs_hw_write(priv
, &pe
);
2804 /* Ethertype: IPv6 without options */
2805 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2806 MVPP2_PE_LAST_FREE_TID
);
2810 memset(&pe
, 0, sizeof(pe
));
2811 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2814 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
2816 /* Skip DIP of IPV6 header */
2817 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2818 MVPP2_MAX_L3_ADDR_SIZE
,
2819 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2820 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2821 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2822 MVPP2_PRS_RI_L3_PROTO_MASK
);
2824 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2826 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2828 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2829 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2830 priv
->prs_shadow
[pe
.index
].finish
= false;
2831 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2832 MVPP2_PRS_RI_L3_PROTO_MASK
);
2833 mvpp2_prs_hw_write(priv
, &pe
);
2835 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2836 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2837 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2838 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2840 /* Unmask all ports */
2841 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2843 /* Generate flow in the next iteration*/
2844 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2845 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2846 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2847 MVPP2_PRS_RI_L3_PROTO_MASK
);
2848 /* Set L3 offset even it's unknown L3 */
2849 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2851 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2853 /* Update shadow table and hw entry */
2854 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2855 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2856 priv
->prs_shadow
[pe
.index
].finish
= true;
2857 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2858 MVPP2_PRS_RI_L3_PROTO_MASK
);
2859 mvpp2_prs_hw_write(priv
, &pe
);
2864 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2871 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2873 struct mvpp2_prs_entry pe
;
2876 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
2877 MVPP2_PRS_DBL_VLANS_MAX
,
2879 if (!priv
->prs_double_vlans
)
2882 /* Double VLAN: 0x8100, 0x88A8 */
2883 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
2884 MVPP2_PRS_PORT_MASK
);
2888 /* Double VLAN: 0x8100, 0x8100 */
2889 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
2890 MVPP2_PRS_PORT_MASK
);
2894 /* Single VLAN: 0x88a8 */
2895 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
2896 MVPP2_PRS_PORT_MASK
);
2900 /* Single VLAN: 0x8100 */
2901 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
2902 MVPP2_PRS_PORT_MASK
);
2906 /* Set default double vlan entry */
2907 memset(&pe
, 0, sizeof(pe
));
2908 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2909 pe
.index
= MVPP2_PE_VLAN_DBL
;
2911 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2912 /* Clear ai for next iterations */
2913 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2914 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2915 MVPP2_PRS_RI_VLAN_MASK
);
2917 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
2918 MVPP2_PRS_DBL_VLAN_AI_BIT
);
2919 /* Unmask all ports */
2920 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2922 /* Update shadow table and hw entry */
2923 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2924 mvpp2_prs_hw_write(priv
, &pe
);
2926 /* Set default vlan none entry */
2927 memset(&pe
, 0, sizeof(pe
));
2928 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2929 pe
.index
= MVPP2_PE_VLAN_NONE
;
2931 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2932 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2933 MVPP2_PRS_RI_VLAN_MASK
);
2935 /* Unmask all ports */
2936 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2938 /* Update shadow table and hw entry */
2939 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2940 mvpp2_prs_hw_write(priv
, &pe
);
2945 /* Set entries for PPPoE ethertype */
2946 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
2948 struct mvpp2_prs_entry pe
;
2951 /* IPv4 over PPPoE with options */
2952 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2953 MVPP2_PE_LAST_FREE_TID
);
2957 memset(&pe
, 0, sizeof(pe
));
2958 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2961 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
2963 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2964 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2965 MVPP2_PRS_RI_L3_PROTO_MASK
);
2966 /* Skip eth_type + 4 bytes of IP header */
2967 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2968 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2970 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2972 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2974 /* Update shadow table and hw entry */
2975 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2976 mvpp2_prs_hw_write(priv
, &pe
);
2978 /* IPv4 over PPPoE without options */
2979 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2980 MVPP2_PE_LAST_FREE_TID
);
2986 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2987 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2988 MVPP2_PRS_IPV4_HEAD_MASK
|
2989 MVPP2_PRS_IPV4_IHL_MASK
);
2991 /* Clear ri before updating */
2992 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2993 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2994 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2995 MVPP2_PRS_RI_L3_PROTO_MASK
);
2997 /* Update shadow table and hw entry */
2998 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2999 mvpp2_prs_hw_write(priv
, &pe
);
3001 /* IPv6 over PPPoE */
3002 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3003 MVPP2_PE_LAST_FREE_TID
);
3007 memset(&pe
, 0, sizeof(pe
));
3008 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
3011 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
3013 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3014 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
3015 MVPP2_PRS_RI_L3_PROTO_MASK
);
3016 /* Skip eth_type + 4 bytes of IPv6 header */
3017 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
3018 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3020 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
3022 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3024 /* Update shadow table and hw entry */
3025 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
3026 mvpp2_prs_hw_write(priv
, &pe
);
3028 /* Non-IP over PPPoE */
3029 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3030 MVPP2_PE_LAST_FREE_TID
);
3034 memset(&pe
, 0, sizeof(pe
));
3035 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
3038 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
3039 MVPP2_PRS_RI_L3_PROTO_MASK
);
3041 /* Finished: go to flowid generation */
3042 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3043 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3044 /* Set L3 offset even if it's unknown L3 */
3045 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
3047 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3049 /* Update shadow table and hw entry */
3050 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
3051 mvpp2_prs_hw_write(priv
, &pe
);
3056 /* Initialize entries for IPv4 */
3057 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
3059 struct mvpp2_prs_entry pe
;
3062 /* Set entries for TCP, UDP and IGMP over IPv4 */
3063 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
3064 MVPP2_PRS_RI_L4_PROTO_MASK
);
3068 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
3069 MVPP2_PRS_RI_L4_PROTO_MASK
);
3073 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
3074 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
3075 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
3076 MVPP2_PRS_RI_CPU_CODE_MASK
|
3077 MVPP2_PRS_RI_UDF3_MASK
);
3081 /* IPv4 Broadcast */
3082 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
3086 /* IPv4 Multicast */
3087 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
3091 /* Default IPv4 entry for unknown protocols */
3092 memset(&pe
, 0, sizeof(pe
));
3093 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3094 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
3096 /* Set next lu to IPv4 */
3097 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3098 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3100 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
3101 sizeof(struct iphdr
) - 4,
3102 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3103 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
3104 MVPP2_PRS_IPV4_DIP_AI_BIT
);
3105 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3106 MVPP2_PRS_RI_L4_PROTO_MASK
);
3108 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
3109 /* Unmask all ports */
3110 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3112 /* Update shadow table and hw entry */
3113 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3114 mvpp2_prs_hw_write(priv
, &pe
);
3116 /* Default IPv4 entry for unicast address */
3117 memset(&pe
, 0, sizeof(pe
));
3118 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3119 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
3121 /* Finished: go to flowid generation */
3122 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3123 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3124 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
3125 MVPP2_PRS_RI_L3_ADDR_MASK
);
3127 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
3128 MVPP2_PRS_IPV4_DIP_AI_BIT
);
3129 /* Unmask all ports */
3130 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3132 /* Update shadow table and hw entry */
3133 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3134 mvpp2_prs_hw_write(priv
, &pe
);
3139 /* Initialize entries for IPv6 */
3140 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
3142 struct mvpp2_prs_entry pe
;
3145 /* Set entries for TCP, UDP and ICMP over IPv6 */
3146 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
3147 MVPP2_PRS_RI_L4_TCP
,
3148 MVPP2_PRS_RI_L4_PROTO_MASK
);
3152 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
3153 MVPP2_PRS_RI_L4_UDP
,
3154 MVPP2_PRS_RI_L4_PROTO_MASK
);
3158 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
3159 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
3160 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
3161 MVPP2_PRS_RI_CPU_CODE_MASK
|
3162 MVPP2_PRS_RI_UDF3_MASK
);
3166 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3167 /* Result Info: UDF7=1, DS lite */
3168 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
3169 MVPP2_PRS_RI_UDF7_IP6_LITE
,
3170 MVPP2_PRS_RI_UDF7_MASK
);
3174 /* IPv6 multicast */
3175 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
3179 /* Entry for checking hop limit */
3180 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3181 MVPP2_PE_LAST_FREE_TID
);
3185 memset(&pe
, 0, sizeof(pe
));
3186 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3189 /* Finished: go to flowid generation */
3190 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3191 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3192 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
3193 MVPP2_PRS_RI_DROP_MASK
,
3194 MVPP2_PRS_RI_L3_PROTO_MASK
|
3195 MVPP2_PRS_RI_DROP_MASK
);
3197 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
3198 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3199 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3201 /* Update shadow table and hw entry */
3202 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3203 mvpp2_prs_hw_write(priv
, &pe
);
3205 /* Default IPv6 entry for unknown protocols */
3206 memset(&pe
, 0, sizeof(pe
));
3207 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3208 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
3210 /* Finished: go to flowid generation */
3211 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3212 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3213 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3214 MVPP2_PRS_RI_L4_PROTO_MASK
);
3215 /* Set L4 offset relatively to our current place */
3216 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
3217 sizeof(struct ipv6hdr
) - 4,
3218 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3220 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3221 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3222 /* Unmask all ports */
3223 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3225 /* Update shadow table and hw entry */
3226 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3227 mvpp2_prs_hw_write(priv
, &pe
);
3229 /* Default IPv6 entry for unknown ext protocols */
3230 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
3231 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3232 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
3234 /* Finished: go to flowid generation */
3235 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3236 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3237 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3238 MVPP2_PRS_RI_L4_PROTO_MASK
);
3240 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
3241 MVPP2_PRS_IPV6_EXT_AI_BIT
);
3242 /* Unmask all ports */
3243 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3245 /* Update shadow table and hw entry */
3246 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3247 mvpp2_prs_hw_write(priv
, &pe
);
3249 /* Default IPv6 entry for unicast address */
3250 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
3251 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3252 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
3254 /* Finished: go to IPv6 again */
3255 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3256 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
3257 MVPP2_PRS_RI_L3_ADDR_MASK
);
3258 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3259 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3260 /* Shift back to IPV6 NH */
3261 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3263 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3264 /* Unmask all ports */
3265 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3267 /* Update shadow table and hw entry */
3268 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
3269 mvpp2_prs_hw_write(priv
, &pe
);
3274 /* Parser default initialization */
3275 static int mvpp2_prs_default_init(struct platform_device
*pdev
,
3280 /* Enable tcam table */
3281 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
3283 /* Clear all tcam and sram entries */
3284 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
3285 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
3286 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
3287 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
3289 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
3290 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
3291 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
3294 /* Invalidate all tcam entries */
3295 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
3296 mvpp2_prs_hw_inv(priv
, index
);
3298 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
3299 sizeof(*priv
->prs_shadow
),
3301 if (!priv
->prs_shadow
)
3304 /* Always start from lookup = 0 */
3305 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
3306 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
3307 MVPP2_PRS_PORT_LU_MAX
, 0);
3309 mvpp2_prs_def_flow_init(priv
);
3311 mvpp2_prs_mh_init(priv
);
3313 mvpp2_prs_mac_init(priv
);
3315 mvpp2_prs_dsa_init(priv
);
3317 err
= mvpp2_prs_etype_init(priv
);
3321 err
= mvpp2_prs_vlan_init(pdev
, priv
);
3325 err
= mvpp2_prs_pppoe_init(priv
);
3329 err
= mvpp2_prs_ip6_init(priv
);
3333 err
= mvpp2_prs_ip4_init(priv
);
3340 /* Compare MAC DA with tcam entry data */
3341 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
3342 const u8
*da
, unsigned char *mask
)
3344 unsigned char tcam_byte
, tcam_mask
;
3347 for (index
= 0; index
< ETH_ALEN
; index
++) {
3348 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
3349 if (tcam_mask
!= mask
[index
])
3352 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
3359 /* Find tcam entry with matched pair <MAC DA, port> */
3360 static struct mvpp2_prs_entry
*
3361 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
3362 unsigned char *mask
, int udf_type
)
3364 struct mvpp2_prs_entry
*pe
;
3367 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3370 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3372 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3373 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3374 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3375 unsigned int entry_pmap
;
3377 if (!priv
->prs_shadow
[tid
].valid
||
3378 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3379 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
3383 mvpp2_prs_hw_read(priv
, pe
);
3384 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3386 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
3395 /* Update parser's mac da entry */
3396 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
3397 const u8
*da
, bool add
)
3399 struct mvpp2_prs_entry
*pe
;
3400 unsigned int pmap
, len
, ri
;
3401 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3404 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3405 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
3406 MVPP2_PRS_UDF_MAC_DEF
);
3413 /* Create new TCAM entry */
3414 /* Find first range mac entry*/
3415 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3416 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
3417 if (priv
->prs_shadow
[tid
].valid
&&
3418 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
3419 (priv
->prs_shadow
[tid
].udf
==
3420 MVPP2_PRS_UDF_MAC_RANGE
))
3423 /* Go through the all entries from first to last */
3424 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3429 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3432 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3435 /* Mask all ports */
3436 mvpp2_prs_tcam_port_map_set(pe
, 0);
3439 /* Update port mask */
3440 mvpp2_prs_tcam_port_set(pe
, port
, add
);
3442 /* Invalidate the entry if no ports are left enabled */
3443 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3449 mvpp2_prs_hw_inv(priv
, pe
->index
);
3450 priv
->prs_shadow
[pe
->index
].valid
= false;
3455 /* Continue - set next lookup */
3456 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
3458 /* Set match on DA */
3461 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
3463 /* Set result info bits */
3464 if (is_broadcast_ether_addr(da
))
3465 ri
= MVPP2_PRS_RI_L2_BCAST
;
3466 else if (is_multicast_ether_addr(da
))
3467 ri
= MVPP2_PRS_RI_L2_MCAST
;
3469 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
3471 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3472 MVPP2_PRS_RI_MAC_ME_MASK
);
3473 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3474 MVPP2_PRS_RI_MAC_ME_MASK
);
3476 /* Shift to ethertype */
3477 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
3478 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3480 /* Update shadow table and hw entry */
3481 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
3482 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
3483 mvpp2_prs_hw_write(priv
, pe
);
3490 static int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
3492 struct mvpp2_port
*port
= netdev_priv(dev
);
3495 /* Remove old parser entry */
3496 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, dev
->dev_addr
,
3501 /* Add new parser entry */
3502 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
3506 /* Set addr in the device */
3507 ether_addr_copy(dev
->dev_addr
, da
);
3512 /* Delete all port's multicast simple (not range) entries */
3513 static void mvpp2_prs_mcast_del_all(struct mvpp2
*priv
, int port
)
3515 struct mvpp2_prs_entry pe
;
3518 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3519 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3520 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
3522 if (!priv
->prs_shadow
[tid
].valid
||
3523 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3524 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
3527 /* Only simple mac entries */
3529 mvpp2_prs_hw_read(priv
, &pe
);
3531 /* Read mac addr from entry */
3532 for (index
= 0; index
< ETH_ALEN
; index
++)
3533 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
3536 if (is_multicast_ether_addr(da
) && !is_broadcast_ether_addr(da
))
3537 /* Delete this entry */
3538 mvpp2_prs_mac_da_accept(priv
, port
, da
, false);
3542 static int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
3545 case MVPP2_TAG_TYPE_EDSA
:
3546 /* Add port to EDSA entries */
3547 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3548 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3549 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3550 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3551 /* Remove port from DSA entries */
3552 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3553 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3554 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3555 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3558 case MVPP2_TAG_TYPE_DSA
:
3559 /* Add port to DSA entries */
3560 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3561 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3562 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3563 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3564 /* Remove port from EDSA entries */
3565 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3566 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3567 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3568 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3571 case MVPP2_TAG_TYPE_MH
:
3572 case MVPP2_TAG_TYPE_NONE
:
3573 /* Remove port form EDSA and DSA entries */
3574 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3575 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3576 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3577 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3578 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3579 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3580 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3581 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3585 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
3592 /* Set prs flow for the port */
3593 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
3595 struct mvpp2_prs_entry
*pe
;
3598 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
3600 /* Such entry not exist */
3602 /* Go through the all entires from last to first */
3603 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
3604 MVPP2_PE_LAST_FREE_TID
,
3605 MVPP2_PE_FIRST_FREE_TID
);
3609 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3613 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
3617 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
3618 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
3620 /* Update shadow table */
3621 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
3624 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
3625 mvpp2_prs_hw_write(port
->priv
, pe
);
3631 /* Classifier configuration routines */
3633 /* Update classification flow table registers */
3634 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
3635 struct mvpp2_cls_flow_entry
*fe
)
3637 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
3638 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
3639 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
3640 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
3643 /* Update classification lookup table register */
3644 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
3645 struct mvpp2_cls_lookup_entry
*le
)
3649 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
3650 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
3651 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
3654 /* Classifier default initialization */
3655 static void mvpp2_cls_init(struct mvpp2
*priv
)
3657 struct mvpp2_cls_lookup_entry le
;
3658 struct mvpp2_cls_flow_entry fe
;
3661 /* Enable classifier */
3662 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
3664 /* Clear classifier flow table */
3665 memset(&fe
.data
, 0, sizeof(fe
.data
));
3666 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
3668 mvpp2_cls_flow_write(priv
, &fe
);
3671 /* Clear classifier lookup table */
3673 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
3676 mvpp2_cls_lookup_write(priv
, &le
);
3679 mvpp2_cls_lookup_write(priv
, &le
);
3683 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
3685 struct mvpp2_cls_lookup_entry le
;
3688 /* Set way for the port */
3689 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
3690 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
3691 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
3693 /* Pick the entry to be accessed in lookup ID decoding table
3694 * according to the way and lkpid.
3696 le
.lkpid
= port
->id
;
3700 /* Set initial CPU queue for receiving packets */
3701 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
3702 le
.data
|= port
->first_rxq
;
3704 /* Disable classification engines */
3705 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
3707 /* Update lookup ID table entry */
3708 mvpp2_cls_lookup_write(port
->priv
, &le
);
3711 /* Set CPU queue number for oversize packets */
3712 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
3716 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
3717 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
3719 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
3720 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
3722 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
3723 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
3724 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
3727 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
3729 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3730 return netdev_alloc_frag(pool
->frag_size
);
3732 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
3735 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
3737 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3738 skb_free_frag(data
);
3743 /* Buffer Manager configuration routines */
3746 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
3748 struct mvpp2_bm_pool
*bm_pool
, int size
)
3752 /* Number of buffer pointers must be a multiple of 16, as per
3753 * hardware constraints
3755 if (!IS_ALIGNED(size
, 16))
3758 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3759 * bytes per buffer pointer
3761 if (priv
->hw_version
== MVPP21
)
3762 bm_pool
->size_bytes
= 2 * sizeof(u32
) * size
;
3764 bm_pool
->size_bytes
= 2 * sizeof(u64
) * size
;
3766 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3769 if (!bm_pool
->virt_addr
)
3772 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
3773 MVPP2_BM_POOL_PTR_ALIGN
)) {
3774 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3775 bm_pool
->virt_addr
, bm_pool
->dma_addr
);
3776 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
3777 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
3781 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
3782 lower_32_bits(bm_pool
->dma_addr
));
3783 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
3785 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3786 val
|= MVPP2_BM_START_MASK
;
3787 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3789 bm_pool
->type
= MVPP2_BM_FREE
;
3790 bm_pool
->size
= size
;
3791 bm_pool
->pkt_size
= 0;
3792 bm_pool
->buf_num
= 0;
3797 /* Set pool buffer size */
3798 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
3799 struct mvpp2_bm_pool
*bm_pool
,
3804 bm_pool
->buf_size
= buf_size
;
3806 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
3807 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
3810 static void mvpp2_bm_bufs_get_addrs(struct device
*dev
, struct mvpp2
*priv
,
3811 struct mvpp2_bm_pool
*bm_pool
,
3812 dma_addr_t
*dma_addr
,
3813 phys_addr_t
*phys_addr
)
3815 int cpu
= get_cpu();
3817 *dma_addr
= mvpp2_percpu_read(priv
, cpu
,
3818 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
3819 *phys_addr
= mvpp2_percpu_read(priv
, cpu
, MVPP2_BM_VIRT_ALLOC_REG
);
3821 if (priv
->hw_version
== MVPP22
) {
3823 u32 dma_addr_highbits
, phys_addr_highbits
;
3825 val
= mvpp2_percpu_read(priv
, cpu
, MVPP22_BM_ADDR_HIGH_ALLOC
);
3826 dma_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_PHYS_MASK
);
3827 phys_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_VIRT_MASK
) >>
3828 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT
;
3830 if (sizeof(dma_addr_t
) == 8)
3831 *dma_addr
|= (u64
)dma_addr_highbits
<< 32;
3833 if (sizeof(phys_addr_t
) == 8)
3834 *phys_addr
|= (u64
)phys_addr_highbits
<< 32;
3840 /* Free all buffers from the pool */
3841 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
3842 struct mvpp2_bm_pool
*bm_pool
)
3846 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
3847 dma_addr_t buf_dma_addr
;
3848 phys_addr_t buf_phys_addr
;
3851 mvpp2_bm_bufs_get_addrs(dev
, priv
, bm_pool
,
3852 &buf_dma_addr
, &buf_phys_addr
);
3854 dma_unmap_single(dev
, buf_dma_addr
,
3855 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
3857 data
= (void *)phys_to_virt(buf_phys_addr
);
3861 mvpp2_frag_free(bm_pool
, data
);
3864 /* Update BM driver with number of buffers removed from pool */
3865 bm_pool
->buf_num
-= i
;
3869 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
3871 struct mvpp2_bm_pool
*bm_pool
)
3875 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
);
3876 if (bm_pool
->buf_num
) {
3877 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
3881 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3882 val
|= MVPP2_BM_STOP_MASK
;
3883 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3885 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3891 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
3895 struct mvpp2_bm_pool
*bm_pool
;
3897 /* Create all pools with maximum size */
3898 size
= MVPP2_BM_POOL_SIZE_MAX
;
3899 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3900 bm_pool
= &priv
->bm_pools
[i
];
3902 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
3904 goto err_unroll_pools
;
3905 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
3910 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
3911 for (i
= i
- 1; i
>= 0; i
--)
3912 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
3916 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
3920 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3921 /* Mask BM all interrupts */
3922 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
3923 /* Clear BM cause register */
3924 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
3927 /* Allocate and initialize BM pools */
3928 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
3929 sizeof(*priv
->bm_pools
), GFP_KERNEL
);
3930 if (!priv
->bm_pools
)
3933 err
= mvpp2_bm_pools_init(pdev
, priv
);
3939 /* Attach long pool to rxq */
3940 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
3941 int lrxq
, int long_pool
)
3946 /* Get queue physical ID */
3947 prxq
= port
->rxqs
[lrxq
]->id
;
3949 if (port
->priv
->hw_version
== MVPP21
)
3950 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
3952 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
3954 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3956 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
3957 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3960 /* Attach short pool to rxq */
3961 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
3962 int lrxq
, int short_pool
)
3967 /* Get queue physical ID */
3968 prxq
= port
->rxqs
[lrxq
]->id
;
3970 if (port
->priv
->hw_version
== MVPP21
)
3971 mask
= MVPP21_RXQ_POOL_SHORT_MASK
;
3973 mask
= MVPP22_RXQ_POOL_SHORT_MASK
;
3975 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3977 val
|= (short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) & mask
;
3978 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3981 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
3982 struct mvpp2_bm_pool
*bm_pool
,
3983 dma_addr_t
*buf_dma_addr
,
3984 phys_addr_t
*buf_phys_addr
,
3987 dma_addr_t dma_addr
;
3990 data
= mvpp2_frag_alloc(bm_pool
);
3994 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
3995 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
3997 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
3998 mvpp2_frag_free(bm_pool
, data
);
4001 *buf_dma_addr
= dma_addr
;
4002 *buf_phys_addr
= virt_to_phys(data
);
4007 /* Release buffer to BM */
4008 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
4009 dma_addr_t buf_dma_addr
,
4010 phys_addr_t buf_phys_addr
)
4012 int cpu
= get_cpu();
4014 if (port
->priv
->hw_version
== MVPP22
) {
4017 if (sizeof(dma_addr_t
) == 8)
4018 val
|= upper_32_bits(buf_dma_addr
) &
4019 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
4021 if (sizeof(phys_addr_t
) == 8)
4022 val
|= (upper_32_bits(buf_phys_addr
)
4023 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
4024 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
4026 mvpp2_percpu_write(port
->priv
, cpu
,
4027 MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
4030 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4031 * returned in the "cookie" field of the RX
4032 * descriptor. Instead of storing the virtual address, we
4033 * store the physical address
4035 mvpp2_percpu_write(port
->priv
, cpu
,
4036 MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
4037 mvpp2_percpu_write(port
->priv
, cpu
,
4038 MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
4043 /* Allocate buffers for the pool */
4044 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
4045 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
4047 int i
, buf_size
, total_size
;
4048 dma_addr_t dma_addr
;
4049 phys_addr_t phys_addr
;
4052 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
4053 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
4056 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
4057 netdev_err(port
->dev
,
4058 "cannot allocate %d buffers for pool %d\n",
4059 buf_num
, bm_pool
->id
);
4063 for (i
= 0; i
< buf_num
; i
++) {
4064 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
4065 &phys_addr
, GFP_KERNEL
);
4069 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
4073 /* Update BM driver with number of buffers added to pool */
4074 bm_pool
->buf_num
+= i
;
4076 netdev_dbg(port
->dev
,
4077 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4078 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
4079 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
4081 netdev_dbg(port
->dev
,
4082 "%s pool %d: %d of %d buffers added\n",
4083 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
4084 bm_pool
->id
, i
, buf_num
);
4088 /* Notify the driver that BM pool is being used as specific type and return the
4089 * pool pointer on success
4091 static struct mvpp2_bm_pool
*
4092 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
4095 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
4098 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
4099 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
4103 if (new_pool
->type
== MVPP2_BM_FREE
)
4104 new_pool
->type
= type
;
4106 /* Allocate buffers in case BM pool is used as long pool, but packet
4107 * size doesn't match MTU or BM pool hasn't being used yet
4109 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
4110 (new_pool
->pkt_size
== 0)) {
4113 /* Set default buffer number or free all the buffers in case
4114 * the pool is not empty
4116 pkts_num
= new_pool
->buf_num
;
4118 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
4119 MVPP2_BM_LONG_BUF_NUM
:
4120 MVPP2_BM_SHORT_BUF_NUM
;
4122 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
4123 port
->priv
, new_pool
);
4125 new_pool
->pkt_size
= pkt_size
;
4126 new_pool
->frag_size
=
4127 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
4128 MVPP2_SKB_SHINFO_SIZE
;
4130 /* Allocate buffers for this pool */
4131 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
4132 if (num
!= pkts_num
) {
4133 WARN(1, "pool %d: %d of %d allocated\n",
4134 new_pool
->id
, num
, pkts_num
);
4139 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
4140 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
4145 /* Initialize pools for swf */
4146 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
4150 if (!port
->pool_long
) {
4152 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
4155 if (!port
->pool_long
)
4158 port
->pool_long
->port_map
|= (1 << port
->id
);
4160 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
4161 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
4164 if (!port
->pool_short
) {
4166 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_SHORT_POOL
,
4168 MVPP2_BM_SHORT_PKT_SIZE
);
4169 if (!port
->pool_short
)
4172 port
->pool_short
->port_map
|= (1 << port
->id
);
4174 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
4175 mvpp2_rxq_short_pool_set(port
, rxq
,
4176 port
->pool_short
->id
);
4182 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
4184 struct mvpp2_port
*port
= netdev_priv(dev
);
4185 struct mvpp2_bm_pool
*port_pool
= port
->pool_long
;
4186 int num
, pkts_num
= port_pool
->buf_num
;
4187 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
4189 /* Update BM pool with new buffer size */
4190 mvpp2_bm_bufs_free(dev
->dev
.parent
, port
->priv
, port_pool
);
4191 if (port_pool
->buf_num
) {
4192 WARN(1, "cannot free all buffers in pool %d\n", port_pool
->id
);
4196 port_pool
->pkt_size
= pkt_size
;
4197 port_pool
->frag_size
= SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
4198 MVPP2_SKB_SHINFO_SIZE
;
4199 num
= mvpp2_bm_bufs_add(port
, port_pool
, pkts_num
);
4200 if (num
!= pkts_num
) {
4201 WARN(1, "pool %d: %d of %d allocated\n",
4202 port_pool
->id
, num
, pkts_num
);
4206 mvpp2_bm_pool_bufsize_set(port
->priv
, port_pool
,
4207 MVPP2_RX_BUF_SIZE(port_pool
->pkt_size
));
4209 netdev_update_features(dev
);
4213 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
4215 int i
, sw_thread_mask
= 0;
4217 for (i
= 0; i
< port
->nqvecs
; i
++)
4218 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
4220 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4221 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask
));
4224 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
4226 int i
, sw_thread_mask
= 0;
4228 for (i
= 0; i
< port
->nqvecs
; i
++)
4229 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
4231 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4232 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask
));
4235 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector
*qvec
)
4237 struct mvpp2_port
*port
= qvec
->port
;
4239 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4240 MVPP2_ISR_ENABLE_INTERRUPT(qvec
->sw_thread_mask
));
4243 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector
*qvec
)
4245 struct mvpp2_port
*port
= qvec
->port
;
4247 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4248 MVPP2_ISR_DISABLE_INTERRUPT(qvec
->sw_thread_mask
));
4251 /* Mask the current CPU's Rx/Tx interrupts
4252 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4253 * using smp_processor_id() is OK.
4255 static void mvpp2_interrupts_mask(void *arg
)
4257 struct mvpp2_port
*port
= arg
;
4259 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4260 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
4263 /* Unmask the current CPU's Rx/Tx interrupts.
4264 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4265 * using smp_processor_id() is OK.
4267 static void mvpp2_interrupts_unmask(void *arg
)
4269 struct mvpp2_port
*port
= arg
;
4272 val
= MVPP2_CAUSE_MISC_SUM_MASK
|
4273 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
4274 if (port
->has_tx_irqs
)
4275 val
|= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
4277 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4278 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
4282 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port
*port
, bool mask
)
4287 if (port
->priv
->hw_version
!= MVPP22
)
4293 val
= MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
4295 for (i
= 0; i
< port
->nqvecs
; i
++) {
4296 struct mvpp2_queue_vector
*v
= port
->qvecs
+ i
;
4298 if (v
->type
!= MVPP2_QUEUE_VECTOR_SHARED
)
4301 mvpp2_percpu_write(port
->priv
, v
->sw_thread_id
,
4302 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
4306 /* Port configuration routines */
4308 static void mvpp22_gop_init_rgmii(struct mvpp2_port
*port
)
4310 struct mvpp2
*priv
= port
->priv
;
4313 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
4314 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
;
4315 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
4317 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
4318 if (port
->gop_id
== 2)
4319 val
|= GENCONF_CTRL0_PORT0_RGMII
| GENCONF_CTRL0_PORT1_RGMII
;
4320 else if (port
->gop_id
== 3)
4321 val
|= GENCONF_CTRL0_PORT1_RGMII_MII
;
4322 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
4325 static void mvpp22_gop_init_sgmii(struct mvpp2_port
*port
)
4327 struct mvpp2
*priv
= port
->priv
;
4330 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
4331 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
|
4332 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE
;
4333 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
4335 if (port
->gop_id
> 1) {
4336 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
4337 if (port
->gop_id
== 2)
4338 val
&= ~GENCONF_CTRL0_PORT0_RGMII
;
4339 else if (port
->gop_id
== 3)
4340 val
&= ~GENCONF_CTRL0_PORT1_RGMII_MII
;
4341 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
4345 static void mvpp22_gop_init_10gkr(struct mvpp2_port
*port
)
4347 struct mvpp2
*priv
= port
->priv
;
4348 void __iomem
*mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
4349 void __iomem
*xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
4353 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
4354 val
&= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4355 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4356 val
|= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4357 writel(val
, xpcs
+ MVPP22_XPCS_CFG0
);
4360 val
= readl(mpcs
+ MVPP22_MPCS_CTRL
);
4361 val
&= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN
;
4362 writel(val
, mpcs
+ MVPP22_MPCS_CTRL
);
4364 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
4365 val
&= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC
|
4366 MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
);
4367 val
|= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4368 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
4370 val
&= ~MVPP22_MPCS_CLK_RESET_DIV_SET
;
4371 val
|= MAC_CLK_RESET_MAC
| MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
;
4372 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
4375 static int mvpp22_gop_init(struct mvpp2_port
*port
)
4377 struct mvpp2
*priv
= port
->priv
;
4380 if (!priv
->sysctrl_base
)
4383 switch (port
->phy_interface
) {
4384 case PHY_INTERFACE_MODE_RGMII
:
4385 case PHY_INTERFACE_MODE_RGMII_ID
:
4386 case PHY_INTERFACE_MODE_RGMII_RXID
:
4387 case PHY_INTERFACE_MODE_RGMII_TXID
:
4388 if (port
->gop_id
== 0)
4390 mvpp22_gop_init_rgmii(port
);
4392 case PHY_INTERFACE_MODE_SGMII
:
4393 mvpp22_gop_init_sgmii(port
);
4395 case PHY_INTERFACE_MODE_10GKR
:
4396 if (port
->gop_id
!= 0)
4398 mvpp22_gop_init_10gkr(port
);
4401 goto unsupported_conf
;
4404 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, &val
);
4405 val
|= GENCONF_PORT_CTRL1_RESET(port
->gop_id
) |
4406 GENCONF_PORT_CTRL1_EN(port
->gop_id
);
4407 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, val
);
4409 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
4410 val
|= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR
;
4411 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
4413 regmap_read(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, &val
);
4414 val
|= GENCONF_SOFT_RESET1_GOP
;
4415 regmap_write(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, val
);
4421 netdev_err(port
->dev
, "Invalid port configuration\n");
4425 static int mvpp22_comphy_init(struct mvpp2_port
*port
)
4433 switch (port
->phy_interface
) {
4434 case PHY_INTERFACE_MODE_SGMII
:
4435 mode
= PHY_MODE_SGMII
;
4437 case PHY_INTERFACE_MODE_10GKR
:
4438 mode
= PHY_MODE_10GKR
;
4444 ret
= phy_set_mode(port
->comphy
, mode
);
4448 return phy_power_on(port
->comphy
);
4451 static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port
*port
)
4455 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
4456 val
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4457 val
|= MVPP22_CTRL4_SYNC_BYPASS_DIS
| MVPP22_CTRL4_DP_CLK_SEL
|
4458 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4459 val
&= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL
;
4460 writel(val
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4462 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4463 val
|= MVPP2_GMAC_DISABLE_PADDING
;
4464 val
&= ~MVPP2_GMAC_FLOW_CTRL_MASK
;
4465 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4466 } else if (phy_interface_mode_is_rgmii(port
->phy_interface
)) {
4467 val
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4468 val
|= MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
4469 MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4470 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4471 val
&= ~MVPP22_CTRL4_DP_CLK_SEL
;
4472 writel(val
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4474 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4475 val
&= ~MVPP2_GMAC_DISABLE_PADDING
;
4476 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4479 /* The port is connected to a copper PHY */
4480 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4481 val
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
4482 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4484 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4485 val
|= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS
|
4486 MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
|
4487 MVPP2_GMAC_AN_DUPLEX_EN
;
4488 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
4489 val
|= MVPP2_GMAC_IN_BAND_AUTONEG
;
4490 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4493 static void mvpp2_port_mii_gmac_configure(struct mvpp2_port
*port
)
4497 /* Force link down */
4498 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4499 val
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
4500 val
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
4501 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4503 /* Set the GMAC in a reset state */
4504 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4505 val
|= MVPP2_GMAC_PORT_RESET_MASK
;
4506 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4508 /* Configure the PCS and in-band AN */
4509 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4510 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
4511 val
|= MVPP2_GMAC_INBAND_AN_MASK
| MVPP2_GMAC_PCS_ENABLE_MASK
;
4512 } else if (phy_interface_mode_is_rgmii(port
->phy_interface
)) {
4513 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
4514 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
4516 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4518 mvpp2_port_mii_gmac_configure_mode(port
);
4520 /* Unset the GMAC reset state */
4521 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4522 val
&= ~MVPP2_GMAC_PORT_RESET_MASK
;
4523 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4525 /* Stop forcing link down */
4526 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4527 val
&= ~MVPP2_GMAC_FORCE_LINK_DOWN
;
4528 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4531 static void mvpp2_port_mii_xlg_configure(struct mvpp2_port
*port
)
4535 if (port
->gop_id
!= 0)
4538 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4539 val
|= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
;
4540 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4542 val
= readl(port
->base
+ MVPP22_XLG_CTRL4_REG
);
4543 val
&= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC
;
4544 val
|= MVPP22_XLG_CTRL4_FWD_FC
| MVPP22_XLG_CTRL4_FWD_PFC
;
4545 writel(val
, port
->base
+ MVPP22_XLG_CTRL4_REG
);
4548 static void mvpp22_port_mii_set(struct mvpp2_port
*port
)
4552 /* Only GOP port 0 has an XLG MAC */
4553 if (port
->gop_id
== 0) {
4554 val
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
4555 val
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
4557 if (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
4558 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
4559 val
|= MVPP22_XLG_CTRL3_MACMODESELECT_10G
;
4561 val
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
4563 writel(val
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
4567 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
4569 if (port
->priv
->hw_version
== MVPP22
)
4570 mvpp22_port_mii_set(port
);
4572 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
4573 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
4574 mvpp2_port_mii_gmac_configure(port
);
4575 else if (port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
4576 mvpp2_port_mii_xlg_configure(port
);
4579 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
4583 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4584 val
|= MVPP2_GMAC_FC_ADV_EN
;
4585 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4588 static void mvpp2_port_enable(struct mvpp2_port
*port
)
4592 /* Only GOP port 0 has an XLG MAC */
4593 if (port
->gop_id
== 0 &&
4594 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
4595 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
4596 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4597 val
|= MVPP22_XLG_CTRL0_PORT_EN
|
4598 MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
4599 val
&= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS
;
4600 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4602 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4603 val
|= MVPP2_GMAC_PORT_EN_MASK
;
4604 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
4605 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4609 static void mvpp2_port_disable(struct mvpp2_port
*port
)
4613 /* Only GOP port 0 has an XLG MAC */
4614 if (port
->gop_id
== 0 &&
4615 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
4616 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
4617 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4618 val
&= ~(MVPP22_XLG_CTRL0_PORT_EN
|
4619 MVPP22_XLG_CTRL0_MAC_RESET_DIS
);
4620 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4622 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4623 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
4624 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4628 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4629 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
4633 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
4634 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
4635 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4638 /* Configure loopback port */
4639 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
4643 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4645 if (port
->speed
== 1000)
4646 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
4648 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
4650 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
4651 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
4653 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
4655 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4658 static void mvpp2_port_reset(struct mvpp2_port
*port
)
4662 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4663 ~MVPP2_GMAC_PORT_RESET_MASK
;
4664 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4666 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4667 MVPP2_GMAC_PORT_RESET_MASK
)
4671 /* Change maximum receive size of the port */
4672 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
4676 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4677 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
4678 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
4679 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
4680 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4683 /* Change maximum receive size of the port */
4684 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port
*port
)
4688 val
= readl(port
->base
+ MVPP22_XLG_CTRL1_REG
);
4689 val
&= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK
;
4690 val
|= ((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
4691 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS
;
4692 writel(val
, port
->base
+ MVPP22_XLG_CTRL1_REG
);
4695 /* Set defaults to the MVPP2 port */
4696 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
4698 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
4700 if (port
->priv
->hw_version
== MVPP21
) {
4701 /* Configure port to loopback if needed */
4702 if (port
->flags
& MVPP2_F_LOOPBACK
)
4703 mvpp2_port_loopback_set(port
);
4705 /* Update TX FIFO MIN Threshold */
4706 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4707 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
4708 /* Min. TX threshold must be less than minimal packet length */
4709 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4710 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4713 /* Disable Legacy WRR, Disable EJP, Release from reset */
4714 tx_port_num
= mvpp2_egress_port(port
);
4715 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
4717 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
4719 /* Close bandwidth for all queues */
4720 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
4721 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
4722 mvpp2_write(port
->priv
,
4723 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
4726 /* Set refill period to 1 usec, refill tokens
4727 * and bucket size to maximum
4729 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
4730 port
->priv
->tclk
/ USEC_PER_SEC
);
4731 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
4732 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
4733 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4734 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
4735 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
4736 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
4737 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4739 /* Set MaximumLowLatencyPacketSize value to 256 */
4740 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
4741 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
4742 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4744 /* Enable Rx cache snoop */
4745 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
4746 queue
= port
->rxqs
[lrxq
]->id
;
4747 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4748 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
4749 MVPP2_SNOOP_BUF_HDR_MASK
;
4750 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4753 /* At default, mask all interrupts to all present cpus */
4754 mvpp2_interrupts_disable(port
);
4757 /* Enable/disable receiving packets */
4758 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
4763 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
4764 queue
= port
->rxqs
[lrxq
]->id
;
4765 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4766 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
4767 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4771 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
4776 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
4777 queue
= port
->rxqs
[lrxq
]->id
;
4778 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4779 val
|= MVPP2_RXQ_DISABLE_MASK
;
4780 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4784 /* Enable transmit via physical egress queue
4785 * - HW starts take descriptors from DRAM
4787 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
4791 int tx_port_num
= mvpp2_egress_port(port
);
4793 /* Enable all initialized TXs. */
4795 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4796 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4799 qmap
|= (1 << queue
);
4802 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4803 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
4806 /* Disable transmit via physical egress queue
4807 * - HW doesn't take descriptors from DRAM
4809 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
4813 int tx_port_num
= mvpp2_egress_port(port
);
4815 /* Issue stop command for active channels only */
4816 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4817 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
4818 MVPP2_TXP_SCHED_ENQ_MASK
;
4820 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
4821 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
4823 /* Wait for all Tx activity to terminate. */
4826 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
4827 netdev_warn(port
->dev
,
4828 "Tx stop timed out, status=0x%08x\n",
4835 /* Check port TX Command register that all
4836 * Tx queues are stopped
4838 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
4839 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
4842 /* Rx descriptors helper methods */
4844 /* Get number of Rx descriptors occupied by received packets */
4846 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
4848 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
4850 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
4853 /* Update Rx queue status with the number of occupied and available
4854 * Rx descriptor slots.
4857 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
4858 int used_count
, int free_count
)
4860 /* Decrement the number of used descriptors and increment count
4861 * increment the number of free descriptors.
4863 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
4865 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
4868 /* Get pointer to next RX descriptor to be processed by SW */
4869 static inline struct mvpp2_rx_desc
*
4870 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
4872 int rx_desc
= rxq
->next_desc_to_proc
;
4874 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
4875 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
4876 return rxq
->descs
+ rx_desc
;
4879 /* Set rx queue offset */
4880 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
4881 int prxq
, int offset
)
4885 /* Convert offset from bytes to units of 32 bytes */
4886 offset
= offset
>> 5;
4888 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
4889 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
4892 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
4893 MVPP2_RXQ_PACKET_OFFSET_MASK
);
4895 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
4898 /* Tx descriptors helper methods */
4900 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4901 static struct mvpp2_tx_desc
*
4902 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
4904 int tx_desc
= txq
->next_desc_to_proc
;
4906 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
4907 return txq
->descs
+ tx_desc
;
4910 /* Update HW with number of aggregated Tx descriptors to be sent
4912 * Called only from mvpp2_tx(), so migration is disabled, using
4913 * smp_processor_id() is OK.
4915 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
4917 /* aggregated access - relevant TXQ number is written in TX desc */
4918 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4919 MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
4923 /* Check if there are enough free descriptors in aggregated txq.
4924 * If not, update the number of occupied descriptors and repeat the check.
4926 * Called only from mvpp2_tx(), so migration is disabled, using
4927 * smp_processor_id() is OK.
4929 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
4930 struct mvpp2_tx_queue
*aggr_txq
, int num
)
4932 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
) {
4933 /* Update number of occupied aggregated Tx descriptors */
4934 int cpu
= smp_processor_id();
4935 u32 val
= mvpp2_read(priv
, MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
4937 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
4940 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
)
4946 /* Reserved Tx descriptors allocation request
4948 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4949 * only by mvpp2_tx(), so migration is disabled, using
4950 * smp_processor_id() is OK.
4952 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
4953 struct mvpp2_tx_queue
*txq
, int num
)
4956 int cpu
= smp_processor_id();
4958 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
4959 mvpp2_percpu_write(priv
, cpu
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
4961 val
= mvpp2_percpu_read(priv
, cpu
, MVPP2_TXQ_RSVD_RSLT_REG
);
4963 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
4966 /* Check if there are enough reserved descriptors for transmission.
4967 * If not, request chunk of reserved descriptors and check again.
4969 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
4970 struct mvpp2_tx_queue
*txq
,
4971 struct mvpp2_txq_pcpu
*txq_pcpu
,
4974 int req
, cpu
, desc_count
;
4976 if (txq_pcpu
->reserved_num
>= num
)
4979 /* Not enough descriptors reserved! Update the reserved descriptor
4980 * count and check again.
4984 /* Compute total of used descriptors */
4985 for_each_present_cpu(cpu
) {
4986 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
4988 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
4989 desc_count
+= txq_pcpu_aux
->count
;
4990 desc_count
+= txq_pcpu_aux
->reserved_num
;
4993 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
4997 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
5000 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
5002 /* OK, the descriptor cound has been updated: check again. */
5003 if (txq_pcpu
->reserved_num
< num
)
5008 /* Release the last allocated Tx descriptor. Useful to handle DMA
5009 * mapping failures in the Tx path.
5011 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
5013 if (txq
->next_desc_to_proc
== 0)
5014 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
5016 txq
->next_desc_to_proc
--;
5019 /* Set Tx descriptors fields relevant for CSUM calculation */
5020 static u32
mvpp2_txq_desc_csum(int l3_offs
, int l3_proto
,
5021 int ip_hdr_len
, int l4_proto
)
5025 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5026 * G_L4_chk, L4_type required only for checksum calculation
5028 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
5029 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
5030 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
5032 if (l3_proto
== swab16(ETH_P_IP
)) {
5033 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
5034 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
5036 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
5039 if (l4_proto
== IPPROTO_TCP
) {
5040 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
5041 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
5042 } else if (l4_proto
== IPPROTO_UDP
) {
5043 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
5044 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
5046 command
|= MVPP2_TXD_L4_CSUM_NOT
;
5052 /* Get number of sent descriptors and decrement counter.
5053 * The number of sent descriptors is returned.
5056 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5057 * (migration disabled) and from the TX completion tasklet (migration
5058 * disabled) so using smp_processor_id() is OK.
5060 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
5061 struct mvpp2_tx_queue
*txq
)
5065 /* Reading status reg resets transmitted descriptor counter */
5066 val
= mvpp2_percpu_read(port
->priv
, smp_processor_id(),
5067 MVPP2_TXQ_SENT_REG(txq
->id
));
5069 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
5070 MVPP2_TRANSMITTED_COUNT_OFFSET
;
5073 /* Called through on_each_cpu(), so runs on all CPUs, with migration
5074 * disabled, therefore using smp_processor_id() is OK.
5076 static void mvpp2_txq_sent_counter_clear(void *arg
)
5078 struct mvpp2_port
*port
= arg
;
5081 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
5082 int id
= port
->txqs
[queue
]->id
;
5084 mvpp2_percpu_read(port
->priv
, smp_processor_id(),
5085 MVPP2_TXQ_SENT_REG(id
));
5089 /* Set max sizes for Tx queues */
5090 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
5093 int txq
, tx_port_num
;
5095 mtu
= port
->pkt_size
* 8;
5096 if (mtu
> MVPP2_TXP_MTU_MAX
)
5097 mtu
= MVPP2_TXP_MTU_MAX
;
5099 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5102 /* Indirect access to registers */
5103 tx_port_num
= mvpp2_egress_port(port
);
5104 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
5107 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
5108 val
&= ~MVPP2_TXP_MTU_MAX
;
5110 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
5112 /* TXP token size and all TXQs token size must be larger that MTU */
5113 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
5114 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
5117 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
5119 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
5122 for (txq
= 0; txq
< port
->ntxqs
; txq
++) {
5123 val
= mvpp2_read(port
->priv
,
5124 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
5125 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
5129 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
5131 mvpp2_write(port
->priv
,
5132 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
5138 /* Set the number of packets that will be received before Rx interrupt
5139 * will be generated by HW.
5141 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
5142 struct mvpp2_rx_queue
*rxq
)
5144 int cpu
= get_cpu();
5146 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
5147 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
5149 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
5150 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_THRESH_REG
,
5156 /* For some reason in the LSP this is done on each CPU. Why ? */
5157 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port
*port
,
5158 struct mvpp2_tx_queue
*txq
)
5160 int cpu
= get_cpu();
5163 if (txq
->done_pkts_coal
> MVPP2_TXQ_THRESH_MASK
)
5164 txq
->done_pkts_coal
= MVPP2_TXQ_THRESH_MASK
;
5166 val
= (txq
->done_pkts_coal
<< MVPP2_TXQ_THRESH_OFFSET
);
5167 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5168 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_THRESH_REG
, val
);
5173 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
5175 u64 tmp
= (u64
)clk_hz
* usec
;
5177 do_div(tmp
, USEC_PER_SEC
);
5179 return tmp
> U32_MAX
? U32_MAX
: tmp
;
5182 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
5184 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
5186 do_div(tmp
, clk_hz
);
5188 return tmp
> U32_MAX
? U32_MAX
: tmp
;
5191 /* Set the time delay in usec before Rx interrupt */
5192 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
5193 struct mvpp2_rx_queue
*rxq
)
5195 unsigned long freq
= port
->priv
->tclk
;
5196 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
5198 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
5200 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
5202 /* re-evaluate to get actual register value */
5203 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
5206 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
5209 static void mvpp2_tx_time_coal_set(struct mvpp2_port
*port
)
5211 unsigned long freq
= port
->priv
->tclk
;
5212 u32 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
5214 if (val
> MVPP2_MAX_ISR_TX_THRESHOLD
) {
5215 port
->tx_time_coal
=
5216 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD
, freq
);
5218 /* re-evaluate to get actual register value */
5219 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
5222 mvpp2_write(port
->priv
, MVPP2_ISR_TX_THRESHOLD_REG(port
->id
), val
);
5225 /* Free Tx queue skbuffs */
5226 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
5227 struct mvpp2_tx_queue
*txq
,
5228 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
5232 for (i
= 0; i
< num
; i
++) {
5233 struct mvpp2_txq_pcpu_buf
*tx_buf
=
5234 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
5236 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
5237 tx_buf
->size
, DMA_TO_DEVICE
);
5239 dev_kfree_skb_any(tx_buf
->skb
);
5241 mvpp2_txq_inc_get(txq_pcpu
);
5245 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
5248 int queue
= fls(cause
) - 1;
5250 return port
->rxqs
[queue
];
5253 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
5256 int queue
= fls(cause
) - 1;
5258 return port
->txqs
[queue
];
5261 /* Handle end of transmission */
5262 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
5263 struct mvpp2_txq_pcpu
*txq_pcpu
)
5265 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
5268 if (txq_pcpu
->cpu
!= smp_processor_id())
5269 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
5271 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
5274 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
5276 txq_pcpu
->count
-= tx_done
;
5278 if (netif_tx_queue_stopped(nq
))
5279 if (txq_pcpu
->size
- txq_pcpu
->count
>= MAX_SKB_FRAGS
+ 1)
5280 netif_tx_wake_queue(nq
);
5283 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
,
5286 struct mvpp2_tx_queue
*txq
;
5287 struct mvpp2_txq_pcpu
*txq_pcpu
;
5288 unsigned int tx_todo
= 0;
5291 txq
= mvpp2_get_tx_queue(port
, cause
);
5295 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5297 if (txq_pcpu
->count
) {
5298 mvpp2_txq_done(port
, txq
, txq_pcpu
);
5299 tx_todo
+= txq_pcpu
->count
;
5302 cause
&= ~(1 << txq
->log_id
);
5307 /* Rx/Tx queue initialization/cleanup methods */
5309 /* Allocate and initialize descriptors for aggr TXQ */
5310 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
5311 struct mvpp2_tx_queue
*aggr_txq
, int cpu
,
5316 /* Allocate memory for TX descriptors */
5317 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
5318 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
5319 &aggr_txq
->descs_dma
, GFP_KERNEL
);
5320 if (!aggr_txq
->descs
)
5323 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
5325 /* Aggr TXQ no reset WA */
5326 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
5327 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
5329 /* Set Tx descriptors queue starting address indirect
5332 if (priv
->hw_version
== MVPP21
)
5333 txq_dma
= aggr_txq
->descs_dma
;
5335 txq_dma
= aggr_txq
->descs_dma
>>
5336 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
5338 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
), txq_dma
);
5339 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
),
5340 MVPP2_AGGR_TXQ_SIZE
);
5345 /* Create a specified Rx queue */
5346 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
5347 struct mvpp2_rx_queue
*rxq
)
5353 rxq
->size
= port
->rx_ring_size
;
5355 /* Allocate memory for RX descriptors */
5356 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
5357 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5358 &rxq
->descs_dma
, GFP_KERNEL
);
5362 rxq
->last_desc
= rxq
->size
- 1;
5364 /* Zero occupied and non-occupied counters - direct access */
5365 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
5367 /* Set Rx descriptors queue starting address - indirect access */
5369 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
5370 if (port
->priv
->hw_version
== MVPP21
)
5371 rxq_dma
= rxq
->descs_dma
;
5373 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
5374 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
5375 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
5376 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_INDEX_REG
, 0);
5380 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
5382 /* Set coalescing pkts and time */
5383 mvpp2_rx_pkts_coal_set(port
, rxq
);
5384 mvpp2_rx_time_coal_set(port
, rxq
);
5386 /* Add number of descriptors ready for receiving packets */
5387 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
5392 /* Push packets received by the RXQ to BM pool */
5393 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
5394 struct mvpp2_rx_queue
*rxq
)
5398 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5402 for (i
= 0; i
< rx_received
; i
++) {
5403 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5404 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5407 pool
= (status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
5408 MVPP2_RXD_BM_POOL_ID_OFFS
;
5410 mvpp2_bm_pool_put(port
, pool
,
5411 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
5412 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
5414 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
5417 /* Cleanup Rx queue */
5418 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
5419 struct mvpp2_rx_queue
*rxq
)
5423 mvpp2_rxq_drop_pkts(port
, rxq
);
5426 dma_free_coherent(port
->dev
->dev
.parent
,
5427 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5433 rxq
->next_desc_to_proc
= 0;
5436 /* Clear Rx descriptors queue starting address and size;
5437 * free descriptor number
5439 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
5441 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
5442 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
5443 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
5447 /* Create and initialize a Tx queue */
5448 static int mvpp2_txq_init(struct mvpp2_port
*port
,
5449 struct mvpp2_tx_queue
*txq
)
5452 int cpu
, desc
, desc_per_txq
, tx_port_num
;
5453 struct mvpp2_txq_pcpu
*txq_pcpu
;
5455 txq
->size
= port
->tx_ring_size
;
5457 /* Allocate memory for Tx descriptors */
5458 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
5459 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5460 &txq
->descs_dma
, GFP_KERNEL
);
5464 txq
->last_desc
= txq
->size
- 1;
5466 /* Set Tx descriptors queue starting address - indirect access */
5468 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5469 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
,
5471 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
,
5472 txq
->size
& MVPP2_TXQ_DESC_SIZE_MASK
);
5473 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_INDEX_REG
, 0);
5474 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_RSVD_CLR_REG
,
5475 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
5476 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
);
5477 val
&= ~MVPP2_TXQ_PENDING_MASK
;
5478 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
, val
);
5480 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5481 * for each existing TXQ.
5482 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5483 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5486 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
5487 (txq
->log_id
* desc_per_txq
);
5489 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
,
5490 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
5491 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
5494 /* WRR / EJP configuration - indirect access */
5495 tx_port_num
= mvpp2_egress_port(port
);
5496 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
5498 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
5499 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
5500 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5501 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
5502 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
5504 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
5505 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
5508 for_each_present_cpu(cpu
) {
5509 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5510 txq_pcpu
->size
= txq
->size
;
5511 txq_pcpu
->buffs
= kmalloc_array(txq_pcpu
->size
,
5512 sizeof(*txq_pcpu
->buffs
),
5514 if (!txq_pcpu
->buffs
)
5517 txq_pcpu
->count
= 0;
5518 txq_pcpu
->reserved_num
= 0;
5519 txq_pcpu
->txq_put_index
= 0;
5520 txq_pcpu
->txq_get_index
= 0;
5522 txq_pcpu
->tso_headers
=
5523 dma_alloc_coherent(port
->dev
->dev
.parent
,
5524 MVPP2_AGGR_TXQ_SIZE
* TSO_HEADER_SIZE
,
5525 &txq_pcpu
->tso_headers_dma
,
5527 if (!txq_pcpu
->tso_headers
)
5533 for_each_present_cpu(cpu
) {
5534 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5535 kfree(txq_pcpu
->buffs
);
5537 dma_free_coherent(port
->dev
->dev
.parent
,
5538 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
5539 txq_pcpu
->tso_headers
,
5540 txq_pcpu
->tso_headers_dma
);
5543 dma_free_coherent(port
->dev
->dev
.parent
,
5544 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5545 txq
->descs
, txq
->descs_dma
);
5550 /* Free allocated TXQ resources */
5551 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
5552 struct mvpp2_tx_queue
*txq
)
5554 struct mvpp2_txq_pcpu
*txq_pcpu
;
5557 for_each_present_cpu(cpu
) {
5558 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5559 kfree(txq_pcpu
->buffs
);
5561 dma_free_coherent(port
->dev
->dev
.parent
,
5562 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
5563 txq_pcpu
->tso_headers
,
5564 txq_pcpu
->tso_headers_dma
);
5568 dma_free_coherent(port
->dev
->dev
.parent
,
5569 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5570 txq
->descs
, txq
->descs_dma
);
5574 txq
->next_desc_to_proc
= 0;
5577 /* Set minimum bandwidth for disabled TXQs */
5578 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
5580 /* Set Tx descriptors queue starting address and size */
5582 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5583 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
5584 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
5588 /* Cleanup Tx ports */
5589 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
5591 struct mvpp2_txq_pcpu
*txq_pcpu
;
5592 int delay
, pending
, cpu
;
5596 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5597 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
);
5598 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
5599 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
5601 /* The napi queue has been stopped so wait for all packets
5602 * to be transmitted.
5606 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
5607 netdev_warn(port
->dev
,
5608 "port %d: cleaning queue %d timed out\n",
5609 port
->id
, txq
->log_id
);
5615 pending
= mvpp2_percpu_read(port
->priv
, cpu
,
5616 MVPP2_TXQ_PENDING_REG
);
5617 pending
&= MVPP2_TXQ_PENDING_MASK
;
5620 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
5621 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
5624 for_each_present_cpu(cpu
) {
5625 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5627 /* Release all packets */
5628 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
5631 txq_pcpu
->count
= 0;
5632 txq_pcpu
->txq_put_index
= 0;
5633 txq_pcpu
->txq_get_index
= 0;
5637 /* Cleanup all Tx queues */
5638 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
5640 struct mvpp2_tx_queue
*txq
;
5644 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
5646 /* Reset Tx ports and delete Tx queues */
5647 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
5648 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
5650 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
5651 txq
= port
->txqs
[queue
];
5652 mvpp2_txq_clean(port
, txq
);
5653 mvpp2_txq_deinit(port
, txq
);
5656 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
5658 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
5659 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
5662 /* Cleanup all Rx queues */
5663 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
5667 for (queue
= 0; queue
< port
->nrxqs
; queue
++)
5668 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
5671 /* Init all Rx queues for port */
5672 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
5676 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
5677 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
5684 mvpp2_cleanup_rxqs(port
);
5688 /* Init all tx queues for port */
5689 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
5691 struct mvpp2_tx_queue
*txq
;
5694 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
5695 txq
= port
->txqs
[queue
];
5696 err
= mvpp2_txq_init(port
, txq
);
5701 if (port
->has_tx_irqs
) {
5702 mvpp2_tx_time_coal_set(port
);
5703 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
5704 txq
= port
->txqs
[queue
];
5705 mvpp2_tx_pkts_coal_set(port
, txq
);
5709 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
5713 mvpp2_cleanup_txqs(port
);
5717 /* The callback for per-port interrupt */
5718 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
5720 struct mvpp2_queue_vector
*qv
= dev_id
;
5722 mvpp2_qvec_interrupt_disable(qv
);
5724 napi_schedule(&qv
->napi
);
5729 static void mvpp2_gmac_set_autoneg(struct mvpp2_port
*port
,
5730 struct phy_device
*phydev
)
5734 if (port
->phy_interface
!= PHY_INTERFACE_MODE_RGMII
&&
5735 port
->phy_interface
!= PHY_INTERFACE_MODE_RGMII_ID
&&
5736 port
->phy_interface
!= PHY_INTERFACE_MODE_RGMII_RXID
&&
5737 port
->phy_interface
!= PHY_INTERFACE_MODE_RGMII_TXID
&&
5738 port
->phy_interface
!= PHY_INTERFACE_MODE_SGMII
)
5741 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5742 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
5743 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5744 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
5745 MVPP2_GMAC_AN_SPEED_EN
|
5746 MVPP2_GMAC_AN_DUPLEX_EN
);
5749 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
5751 if (phydev
->speed
== SPEED_1000
)
5752 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
5753 else if (phydev
->speed
== SPEED_100
)
5754 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
5756 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5761 static void mvpp2_link_event(struct net_device
*dev
)
5763 struct mvpp2_port
*port
= netdev_priv(dev
);
5764 struct phy_device
*phydev
= dev
->phydev
;
5765 bool link_reconfigured
= false;
5769 if (port
->phy_interface
!= phydev
->interface
&& port
->comphy
) {
5770 /* disable current port for reconfiguration */
5771 mvpp2_interrupts_disable(port
);
5772 netif_carrier_off(port
->dev
);
5773 mvpp2_port_disable(port
);
5774 phy_power_off(port
->comphy
);
5776 /* comphy reconfiguration */
5777 port
->phy_interface
= phydev
->interface
;
5778 mvpp22_comphy_init(port
);
5780 /* gop/mac reconfiguration */
5781 mvpp22_gop_init(port
);
5782 mvpp2_port_mii_set(port
);
5784 link_reconfigured
= true;
5787 if ((port
->speed
!= phydev
->speed
) ||
5788 (port
->duplex
!= phydev
->duplex
)) {
5789 mvpp2_gmac_set_autoneg(port
, phydev
);
5791 port
->duplex
= phydev
->duplex
;
5792 port
->speed
= phydev
->speed
;
5796 if (phydev
->link
!= port
->link
|| link_reconfigured
) {
5797 port
->link
= phydev
->link
;
5800 if (port
->phy_interface
== PHY_INTERFACE_MODE_RGMII
||
5801 port
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
5802 port
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
||
5803 port
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
||
5804 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
5805 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5806 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
5807 MVPP2_GMAC_FORCE_LINK_DOWN
);
5808 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5811 mvpp2_interrupts_enable(port
);
5812 mvpp2_port_enable(port
);
5814 mvpp2_egress_enable(port
);
5815 mvpp2_ingress_enable(port
);
5816 netif_carrier_on(dev
);
5817 netif_tx_wake_all_queues(dev
);
5822 netif_tx_stop_all_queues(dev
);
5823 netif_carrier_off(dev
);
5824 mvpp2_ingress_disable(port
);
5825 mvpp2_egress_disable(port
);
5827 mvpp2_port_disable(port
);
5828 mvpp2_interrupts_disable(port
);
5831 phy_print_status(phydev
);
5835 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
5839 if (!port_pcpu
->timer_scheduled
) {
5840 port_pcpu
->timer_scheduled
= true;
5841 interval
= MVPP2_TXDONE_HRTIMER_PERIOD_NS
;
5842 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
5843 HRTIMER_MODE_REL_PINNED
);
5847 static void mvpp2_tx_proc_cb(unsigned long data
)
5849 struct net_device
*dev
= (struct net_device
*)data
;
5850 struct mvpp2_port
*port
= netdev_priv(dev
);
5851 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5852 unsigned int tx_todo
, cause
;
5854 if (!netif_running(dev
))
5856 port_pcpu
->timer_scheduled
= false;
5858 /* Process all the Tx queues */
5859 cause
= (1 << port
->ntxqs
) - 1;
5860 tx_todo
= mvpp2_tx_done(port
, cause
, smp_processor_id());
5862 /* Set the timer in case not all the packets were processed */
5864 mvpp2_timer_set(port_pcpu
);
5867 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
5869 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
5870 struct mvpp2_port_pcpu
,
5873 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
5875 return HRTIMER_NORESTART
;
5878 /* Main RX/TX processing routines */
5880 /* Display more error info */
5881 static void mvpp2_rx_error(struct mvpp2_port
*port
,
5882 struct mvpp2_rx_desc
*rx_desc
)
5884 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5885 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5887 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
5888 case MVPP2_RXD_ERR_CRC
:
5889 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%zu\n",
5892 case MVPP2_RXD_ERR_OVERRUN
:
5893 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%zu\n",
5896 case MVPP2_RXD_ERR_RESOURCE
:
5897 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%zu\n",
5903 /* Handle RX checksum offload */
5904 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
5905 struct sk_buff
*skb
)
5907 if (((status
& MVPP2_RXD_L3_IP4
) &&
5908 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
5909 (status
& MVPP2_RXD_L3_IP6
))
5910 if (((status
& MVPP2_RXD_L4_UDP
) ||
5911 (status
& MVPP2_RXD_L4_TCP
)) &&
5912 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
5914 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5918 skb
->ip_summed
= CHECKSUM_NONE
;
5921 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5922 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
5923 struct mvpp2_bm_pool
*bm_pool
, int pool
)
5925 dma_addr_t dma_addr
;
5926 phys_addr_t phys_addr
;
5929 /* No recycle or too many buffers are in use, so allocate a new skb */
5930 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
5935 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
5940 /* Handle tx checksum */
5941 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
5943 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5947 if (skb
->protocol
== htons(ETH_P_IP
)) {
5948 struct iphdr
*ip4h
= ip_hdr(skb
);
5950 /* Calculate IPv4 checksum and L4 checksum */
5951 ip_hdr_len
= ip4h
->ihl
;
5952 l4_proto
= ip4h
->protocol
;
5953 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
5954 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
5956 /* Read l4_protocol from one of IPv6 extra headers */
5957 if (skb_network_header_len(skb
) > 0)
5958 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
5959 l4_proto
= ip6h
->nexthdr
;
5961 return MVPP2_TXD_L4_CSUM_NOT
;
5964 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
5965 skb
->protocol
, ip_hdr_len
, l4_proto
);
5968 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
5971 /* Main rx processing */
5972 static int mvpp2_rx(struct mvpp2_port
*port
, struct napi_struct
*napi
,
5973 int rx_todo
, struct mvpp2_rx_queue
*rxq
)
5975 struct net_device
*dev
= port
->dev
;
5981 /* Get number of received packets and clamp the to-do */
5982 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5983 if (rx_todo
> rx_received
)
5984 rx_todo
= rx_received
;
5986 while (rx_done
< rx_todo
) {
5987 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5988 struct mvpp2_bm_pool
*bm_pool
;
5989 struct sk_buff
*skb
;
5990 unsigned int frag_size
;
5991 dma_addr_t dma_addr
;
5992 phys_addr_t phys_addr
;
5994 int pool
, rx_bytes
, err
;
5998 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5999 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
6000 rx_bytes
-= MVPP2_MH_SIZE
;
6001 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
6002 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
6003 data
= (void *)phys_to_virt(phys_addr
);
6005 pool
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
6006 MVPP2_RXD_BM_POOL_ID_OFFS
;
6007 bm_pool
= &port
->priv
->bm_pools
[pool
];
6009 /* In case of an error, release the requested buffer pointer
6010 * to the Buffer Manager. This request process is controlled
6011 * by the hardware, and the information about the buffer is
6012 * comprised by the RX descriptor.
6014 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
6016 dev
->stats
.rx_errors
++;
6017 mvpp2_rx_error(port
, rx_desc
);
6018 /* Return the buffer to the pool */
6019 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
6023 if (bm_pool
->frag_size
> PAGE_SIZE
)
6026 frag_size
= bm_pool
->frag_size
;
6028 skb
= build_skb(data
, frag_size
);
6030 netdev_warn(port
->dev
, "skb build failed\n");
6031 goto err_drop_frame
;
6034 err
= mvpp2_rx_refill(port
, bm_pool
, pool
);
6036 netdev_err(port
->dev
, "failed to refill BM pools\n");
6037 goto err_drop_frame
;
6040 dma_unmap_single(dev
->dev
.parent
, dma_addr
,
6041 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
6044 rcvd_bytes
+= rx_bytes
;
6046 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
6047 skb_put(skb
, rx_bytes
);
6048 skb
->protocol
= eth_type_trans(skb
, dev
);
6049 mvpp2_rx_csum(port
, rx_status
, skb
);
6051 napi_gro_receive(napi
, skb
);
6055 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
6057 u64_stats_update_begin(&stats
->syncp
);
6058 stats
->rx_packets
+= rcvd_pkts
;
6059 stats
->rx_bytes
+= rcvd_bytes
;
6060 u64_stats_update_end(&stats
->syncp
);
6063 /* Update Rx queue management counters */
6065 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
6071 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
6072 struct mvpp2_tx_desc
*desc
)
6074 dma_addr_t buf_dma_addr
=
6075 mvpp2_txdesc_dma_addr_get(port
, desc
);
6077 mvpp2_txdesc_size_get(port
, desc
);
6078 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
6079 buf_sz
, DMA_TO_DEVICE
);
6080 mvpp2_txq_desc_put(txq
);
6083 /* Handle tx fragmentation processing */
6084 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
6085 struct mvpp2_tx_queue
*aggr_txq
,
6086 struct mvpp2_tx_queue
*txq
)
6088 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
6089 struct mvpp2_tx_desc
*tx_desc
;
6091 dma_addr_t buf_dma_addr
;
6093 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6094 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6095 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
6097 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
6098 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
6099 mvpp2_txdesc_size_set(port
, tx_desc
, frag
->size
);
6101 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
6104 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
6105 mvpp2_txq_desc_put(txq
);
6109 mvpp2_txdesc_offset_set(port
, tx_desc
,
6110 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
6111 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
6112 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
6114 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
6115 /* Last descriptor */
6116 mvpp2_txdesc_cmd_set(port
, tx_desc
,
6118 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
6120 /* Descriptor in the middle: Not First, Not Last */
6121 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
6122 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
6128 /* Release all descriptors that were used to map fragments of
6129 * this packet, as well as the corresponding DMA mappings
6131 for (i
= i
- 1; i
>= 0; i
--) {
6132 tx_desc
= txq
->descs
+ i
;
6133 tx_desc_unmap_put(port
, txq
, tx_desc
);
6139 static inline void mvpp2_tso_put_hdr(struct sk_buff
*skb
,
6140 struct net_device
*dev
,
6141 struct mvpp2_tx_queue
*txq
,
6142 struct mvpp2_tx_queue
*aggr_txq
,
6143 struct mvpp2_txq_pcpu
*txq_pcpu
,
6146 struct mvpp2_port
*port
= netdev_priv(dev
);
6147 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
6150 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
6151 mvpp2_txdesc_size_set(port
, tx_desc
, hdr_sz
);
6153 addr
= txq_pcpu
->tso_headers_dma
+
6154 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
6155 mvpp2_txdesc_offset_set(port
, tx_desc
, addr
& MVPP2_TX_DESC_ALIGN
);
6156 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, addr
& ~MVPP2_TX_DESC_ALIGN
);
6158 mvpp2_txdesc_cmd_set(port
, tx_desc
, mvpp2_skb_tx_csum(port
, skb
) |
6160 MVPP2_TXD_PADDING_DISABLE
);
6161 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
6164 static inline int mvpp2_tso_put_data(struct sk_buff
*skb
,
6165 struct net_device
*dev
, struct tso_t
*tso
,
6166 struct mvpp2_tx_queue
*txq
,
6167 struct mvpp2_tx_queue
*aggr_txq
,
6168 struct mvpp2_txq_pcpu
*txq_pcpu
,
6169 int sz
, bool left
, bool last
)
6171 struct mvpp2_port
*port
= netdev_priv(dev
);
6172 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
6173 dma_addr_t buf_dma_addr
;
6175 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
6176 mvpp2_txdesc_size_set(port
, tx_desc
, sz
);
6178 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, tso
->data
, sz
,
6180 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
6181 mvpp2_txq_desc_put(txq
);
6185 mvpp2_txdesc_offset_set(port
, tx_desc
,
6186 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
6187 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
6188 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
6191 mvpp2_txdesc_cmd_set(port
, tx_desc
, MVPP2_TXD_L_DESC
);
6193 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
6197 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
6200 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
6204 static int mvpp2_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
6205 struct mvpp2_tx_queue
*txq
,
6206 struct mvpp2_tx_queue
*aggr_txq
,
6207 struct mvpp2_txq_pcpu
*txq_pcpu
)
6209 struct mvpp2_port
*port
= netdev_priv(dev
);
6211 int hdr_sz
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
6212 int i
, len
, descs
= 0;
6214 /* Check number of available descriptors */
6215 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
,
6216 tso_count_descs(skb
)) ||
6217 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
, txq_pcpu
,
6218 tso_count_descs(skb
)))
6221 tso_start(skb
, &tso
);
6222 len
= skb
->len
- hdr_sz
;
6224 int left
= min_t(int, skb_shinfo(skb
)->gso_size
, len
);
6225 char *hdr
= txq_pcpu
->tso_headers
+
6226 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
6231 tso_build_hdr(skb
, hdr
, &tso
, left
, len
== 0);
6232 mvpp2_tso_put_hdr(skb
, dev
, txq
, aggr_txq
, txq_pcpu
, hdr_sz
);
6235 int sz
= min_t(int, tso
.size
, left
);
6239 if (mvpp2_tso_put_data(skb
, dev
, &tso
, txq
, aggr_txq
,
6240 txq_pcpu
, sz
, left
, len
== 0))
6242 tso_build_data(skb
, &tso
, sz
);
6249 for (i
= descs
- 1; i
>= 0; i
--) {
6250 struct mvpp2_tx_desc
*tx_desc
= txq
->descs
+ i
;
6251 tx_desc_unmap_put(port
, txq
, tx_desc
);
6256 /* Main tx processing */
6257 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
6259 struct mvpp2_port
*port
= netdev_priv(dev
);
6260 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
6261 struct mvpp2_txq_pcpu
*txq_pcpu
;
6262 struct mvpp2_tx_desc
*tx_desc
;
6263 dma_addr_t buf_dma_addr
;
6268 txq_id
= skb_get_queue_mapping(skb
);
6269 txq
= port
->txqs
[txq_id
];
6270 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
6271 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
6273 if (skb_is_gso(skb
)) {
6274 frags
= mvpp2_tx_tso(skb
, dev
, txq
, aggr_txq
, txq_pcpu
);
6277 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
6279 /* Check number of available descriptors */
6280 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
6281 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
6287 /* Get a descriptor for the first part of the packet */
6288 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
6289 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
6290 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
6292 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
6293 skb_headlen(skb
), DMA_TO_DEVICE
);
6294 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
6295 mvpp2_txq_desc_put(txq
);
6300 mvpp2_txdesc_offset_set(port
, tx_desc
,
6301 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
6302 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
6303 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
6305 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
6308 /* First and Last descriptor */
6309 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
6310 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
6311 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
6313 /* First but not Last */
6314 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
6315 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
6316 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
6318 /* Continue with other skb fragments */
6319 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
6320 tx_desc_unmap_put(port
, txq
, tx_desc
);
6328 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
6329 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
6331 txq_pcpu
->reserved_num
-= frags
;
6332 txq_pcpu
->count
+= frags
;
6333 aggr_txq
->count
+= frags
;
6335 /* Enable transmit */
6337 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
6339 if (txq_pcpu
->size
- txq_pcpu
->count
< MAX_SKB_FRAGS
+ 1)
6340 netif_tx_stop_queue(nq
);
6342 u64_stats_update_begin(&stats
->syncp
);
6343 stats
->tx_packets
++;
6344 stats
->tx_bytes
+= skb
->len
;
6345 u64_stats_update_end(&stats
->syncp
);
6347 dev
->stats
.tx_dropped
++;
6348 dev_kfree_skb_any(skb
);
6351 /* Finalize TX processing */
6352 if (txq_pcpu
->count
>= txq
->done_pkts_coal
)
6353 mvpp2_txq_done(port
, txq
, txq_pcpu
);
6355 /* Set the timer in case not all frags were processed */
6356 if (!port
->has_tx_irqs
&& txq_pcpu
->count
<= frags
&&
6357 txq_pcpu
->count
> 0) {
6358 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
6360 mvpp2_timer_set(port_pcpu
);
6363 return NETDEV_TX_OK
;
6366 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
6368 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
6369 netdev_err(dev
, "FCS error\n");
6370 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
6371 netdev_err(dev
, "rx fifo overrun error\n");
6372 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
6373 netdev_err(dev
, "tx fifo underrun error\n");
6376 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
6378 u32 cause_rx_tx
, cause_rx
, cause_tx
, cause_misc
;
6380 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
6381 struct mvpp2_queue_vector
*qv
;
6382 int cpu
= smp_processor_id();
6384 qv
= container_of(napi
, struct mvpp2_queue_vector
, napi
);
6386 /* Rx/Tx cause register
6388 * Bits 0-15: each bit indicates received packets on the Rx queue
6389 * (bit 0 is for Rx queue 0).
6391 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
6392 * (bit 16 is for Tx queue 0).
6394 * Each CPU has its own Rx/Tx cause register
6396 cause_rx_tx
= mvpp2_percpu_read(port
->priv
, qv
->sw_thread_id
,
6397 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
6399 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
6401 mvpp2_cause_error(port
->dev
, cause_misc
);
6403 /* Clear the cause register */
6404 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
6405 mvpp2_percpu_write(port
->priv
, cpu
,
6406 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
6407 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
6410 cause_tx
= cause_rx_tx
& MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
6412 cause_tx
>>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET
;
6413 mvpp2_tx_done(port
, cause_tx
, qv
->sw_thread_id
);
6416 /* Process RX packets */
6417 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
6418 cause_rx
<<= qv
->first_rxq
;
6419 cause_rx
|= qv
->pending_cause_rx
;
6420 while (cause_rx
&& budget
> 0) {
6422 struct mvpp2_rx_queue
*rxq
;
6424 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
6428 count
= mvpp2_rx(port
, napi
, budget
, rxq
);
6432 /* Clear the bit associated to this Rx queue
6433 * so that next iteration will continue from
6434 * the next Rx queue.
6436 cause_rx
&= ~(1 << rxq
->logic_rxq
);
6442 napi_complete_done(napi
, rx_done
);
6444 mvpp2_qvec_interrupt_enable(qv
);
6446 qv
->pending_cause_rx
= cause_rx
;
6450 /* Set hw internals when starting port */
6451 static void mvpp2_start_dev(struct mvpp2_port
*port
)
6453 struct net_device
*ndev
= port
->dev
;
6456 if (port
->gop_id
== 0 &&
6457 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
6458 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
))
6459 mvpp2_xlg_max_rx_size_set(port
);
6461 mvpp2_gmac_max_rx_size_set(port
);
6463 mvpp2_txp_max_tx_size_set(port
);
6465 for (i
= 0; i
< port
->nqvecs
; i
++)
6466 napi_enable(&port
->qvecs
[i
].napi
);
6468 /* Enable interrupts on all CPUs */
6469 mvpp2_interrupts_enable(port
);
6471 if (port
->priv
->hw_version
== MVPP22
) {
6472 mvpp22_comphy_init(port
);
6473 mvpp22_gop_init(port
);
6476 mvpp2_port_mii_set(port
);
6477 mvpp2_port_enable(port
);
6479 phy_start(ndev
->phydev
);
6480 netif_tx_start_all_queues(port
->dev
);
6483 /* Set hw internals when stopping port */
6484 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
6486 struct net_device
*ndev
= port
->dev
;
6489 /* Stop new packets from arriving to RXQs */
6490 mvpp2_ingress_disable(port
);
6494 /* Disable interrupts on all CPUs */
6495 mvpp2_interrupts_disable(port
);
6497 for (i
= 0; i
< port
->nqvecs
; i
++)
6498 napi_disable(&port
->qvecs
[i
].napi
);
6500 netif_carrier_off(port
->dev
);
6501 netif_tx_stop_all_queues(port
->dev
);
6503 mvpp2_egress_disable(port
);
6504 mvpp2_port_disable(port
);
6506 phy_stop(ndev
->phydev
);
6507 phy_power_off(port
->comphy
);
6510 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
6511 struct ethtool_ringparam
*ring
)
6513 u16 new_rx_pending
= ring
->rx_pending
;
6514 u16 new_tx_pending
= ring
->tx_pending
;
6516 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
6519 if (ring
->rx_pending
> MVPP2_MAX_RXD
)
6520 new_rx_pending
= MVPP2_MAX_RXD
;
6521 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
6522 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
6524 if (ring
->tx_pending
> MVPP2_MAX_TXD
)
6525 new_tx_pending
= MVPP2_MAX_TXD
;
6526 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
6527 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
6529 if (ring
->rx_pending
!= new_rx_pending
) {
6530 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
6531 ring
->rx_pending
, new_rx_pending
);
6532 ring
->rx_pending
= new_rx_pending
;
6535 if (ring
->tx_pending
!= new_tx_pending
) {
6536 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
6537 ring
->tx_pending
, new_tx_pending
);
6538 ring
->tx_pending
= new_tx_pending
;
6544 static void mvpp21_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
6546 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
6548 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
6549 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
6550 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
6551 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
6552 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
6553 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
6554 addr
[3] = mac_addr_h
& 0xFF;
6555 addr
[4] = mac_addr_m
& 0xFF;
6556 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
6559 static int mvpp2_phy_connect(struct mvpp2_port
*port
)
6561 struct phy_device
*phy_dev
;
6563 /* No PHY is attached */
6564 if (!port
->phy_node
)
6567 phy_dev
= of_phy_connect(port
->dev
, port
->phy_node
, mvpp2_link_event
, 0,
6568 port
->phy_interface
);
6570 netdev_err(port
->dev
, "cannot connect to phy\n");
6573 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
6574 phy_dev
->advertising
= phy_dev
->supported
;
6583 static void mvpp2_phy_disconnect(struct mvpp2_port
*port
)
6585 struct net_device
*ndev
= port
->dev
;
6590 phy_disconnect(ndev
->phydev
);
6593 static int mvpp2_irqs_init(struct mvpp2_port
*port
)
6597 for (i
= 0; i
< port
->nqvecs
; i
++) {
6598 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
6600 err
= request_irq(qv
->irq
, mvpp2_isr
, 0, port
->dev
->name
, qv
);
6604 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
)
6605 irq_set_affinity_hint(qv
->irq
,
6606 cpumask_of(qv
->sw_thread_id
));
6611 for (i
= 0; i
< port
->nqvecs
; i
++) {
6612 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
6614 irq_set_affinity_hint(qv
->irq
, NULL
);
6615 free_irq(qv
->irq
, qv
);
6621 static void mvpp2_irqs_deinit(struct mvpp2_port
*port
)
6625 for (i
= 0; i
< port
->nqvecs
; i
++) {
6626 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
6628 irq_set_affinity_hint(qv
->irq
, NULL
);
6629 free_irq(qv
->irq
, qv
);
6633 static int mvpp2_open(struct net_device
*dev
)
6635 struct mvpp2_port
*port
= netdev_priv(dev
);
6636 unsigned char mac_bcast
[ETH_ALEN
] = {
6637 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6640 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
6642 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
6645 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
6646 dev
->dev_addr
, true);
6648 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
6651 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
6653 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
6656 err
= mvpp2_prs_def_flow(port
);
6658 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
6662 /* Allocate the Rx/Tx queues */
6663 err
= mvpp2_setup_rxqs(port
);
6665 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
6669 err
= mvpp2_setup_txqs(port
);
6671 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
6672 goto err_cleanup_rxqs
;
6675 err
= mvpp2_irqs_init(port
);
6677 netdev_err(port
->dev
, "cannot init IRQs\n");
6678 goto err_cleanup_txqs
;
6681 /* In default link is down */
6682 netif_carrier_off(port
->dev
);
6684 err
= mvpp2_phy_connect(port
);
6688 /* Unmask interrupts on all CPUs */
6689 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
6690 mvpp2_shared_interrupt_mask_unmask(port
, false);
6692 mvpp2_start_dev(port
);
6697 mvpp2_irqs_deinit(port
);
6699 mvpp2_cleanup_txqs(port
);
6701 mvpp2_cleanup_rxqs(port
);
6705 static int mvpp2_stop(struct net_device
*dev
)
6707 struct mvpp2_port
*port
= netdev_priv(dev
);
6708 struct mvpp2_port_pcpu
*port_pcpu
;
6711 mvpp2_stop_dev(port
);
6712 mvpp2_phy_disconnect(port
);
6714 /* Mask interrupts on all CPUs */
6715 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
6716 mvpp2_shared_interrupt_mask_unmask(port
, true);
6718 mvpp2_irqs_deinit(port
);
6719 if (!port
->has_tx_irqs
) {
6720 for_each_present_cpu(cpu
) {
6721 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
6723 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
6724 port_pcpu
->timer_scheduled
= false;
6725 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
6728 mvpp2_cleanup_rxqs(port
);
6729 mvpp2_cleanup_txqs(port
);
6734 static void mvpp2_set_rx_mode(struct net_device
*dev
)
6736 struct mvpp2_port
*port
= netdev_priv(dev
);
6737 struct mvpp2
*priv
= port
->priv
;
6738 struct netdev_hw_addr
*ha
;
6740 bool allmulti
= dev
->flags
& IFF_ALLMULTI
;
6742 mvpp2_prs_mac_promisc_set(priv
, id
, dev
->flags
& IFF_PROMISC
);
6743 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_ALL
, allmulti
);
6744 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_IP6
, allmulti
);
6746 /* Remove all port->id's mcast enries */
6747 mvpp2_prs_mcast_del_all(priv
, id
);
6749 if (allmulti
&& !netdev_mc_empty(dev
)) {
6750 netdev_for_each_mc_addr(ha
, dev
)
6751 mvpp2_prs_mac_da_accept(priv
, id
, ha
->addr
, true);
6755 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
6757 struct mvpp2_port
*port
= netdev_priv(dev
);
6758 const struct sockaddr
*addr
= p
;
6761 if (!is_valid_ether_addr(addr
->sa_data
)) {
6762 err
= -EADDRNOTAVAIL
;
6766 if (!netif_running(dev
)) {
6767 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
6770 /* Reconfigure parser to accept the original MAC address */
6771 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
6776 mvpp2_stop_dev(port
);
6778 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
6782 /* Reconfigure parser accept the original MAC address */
6783 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
6787 mvpp2_start_dev(port
);
6788 mvpp2_egress_enable(port
);
6789 mvpp2_ingress_enable(port
);
6792 netdev_err(dev
, "failed to change MAC address\n");
6796 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
6798 struct mvpp2_port
*port
= netdev_priv(dev
);
6801 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
6802 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
6803 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
6804 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
6807 if (!netif_running(dev
)) {
6808 err
= mvpp2_bm_update_mtu(dev
, mtu
);
6810 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
6814 /* Reconfigure BM to the original MTU */
6815 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
6820 mvpp2_stop_dev(port
);
6822 err
= mvpp2_bm_update_mtu(dev
, mtu
);
6824 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
6828 /* Reconfigure BM to the original MTU */
6829 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
6834 mvpp2_start_dev(port
);
6835 mvpp2_egress_enable(port
);
6836 mvpp2_ingress_enable(port
);
6840 netdev_err(dev
, "failed to change MTU\n");
6845 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6847 struct mvpp2_port
*port
= netdev_priv(dev
);
6851 for_each_possible_cpu(cpu
) {
6852 struct mvpp2_pcpu_stats
*cpu_stats
;
6858 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
6860 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
6861 rx_packets
= cpu_stats
->rx_packets
;
6862 rx_bytes
= cpu_stats
->rx_bytes
;
6863 tx_packets
= cpu_stats
->tx_packets
;
6864 tx_bytes
= cpu_stats
->tx_bytes
;
6865 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
6867 stats
->rx_packets
+= rx_packets
;
6868 stats
->rx_bytes
+= rx_bytes
;
6869 stats
->tx_packets
+= tx_packets
;
6870 stats
->tx_bytes
+= tx_bytes
;
6873 stats
->rx_errors
= dev
->stats
.rx_errors
;
6874 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
6875 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
6878 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6885 ret
= phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
6887 mvpp2_link_event(dev
);
6892 /* Ethtool methods */
6894 /* Set interrupt coalescing for ethtools */
6895 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
6896 struct ethtool_coalesce
*c
)
6898 struct mvpp2_port
*port
= netdev_priv(dev
);
6901 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
6902 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
6904 rxq
->time_coal
= c
->rx_coalesce_usecs
;
6905 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
6906 mvpp2_rx_pkts_coal_set(port
, rxq
);
6907 mvpp2_rx_time_coal_set(port
, rxq
);
6910 if (port
->has_tx_irqs
) {
6911 port
->tx_time_coal
= c
->tx_coalesce_usecs
;
6912 mvpp2_tx_time_coal_set(port
);
6915 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
6916 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
6918 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
6920 if (port
->has_tx_irqs
)
6921 mvpp2_tx_pkts_coal_set(port
, txq
);
6927 /* get coalescing for ethtools */
6928 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
6929 struct ethtool_coalesce
*c
)
6931 struct mvpp2_port
*port
= netdev_priv(dev
);
6933 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
6934 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
6935 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
6939 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
6940 struct ethtool_drvinfo
*drvinfo
)
6942 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
6943 sizeof(drvinfo
->driver
));
6944 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
6945 sizeof(drvinfo
->version
));
6946 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
6947 sizeof(drvinfo
->bus_info
));
6950 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
6951 struct ethtool_ringparam
*ring
)
6953 struct mvpp2_port
*port
= netdev_priv(dev
);
6955 ring
->rx_max_pending
= MVPP2_MAX_RXD
;
6956 ring
->tx_max_pending
= MVPP2_MAX_TXD
;
6957 ring
->rx_pending
= port
->rx_ring_size
;
6958 ring
->tx_pending
= port
->tx_ring_size
;
6961 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
6962 struct ethtool_ringparam
*ring
)
6964 struct mvpp2_port
*port
= netdev_priv(dev
);
6965 u16 prev_rx_ring_size
= port
->rx_ring_size
;
6966 u16 prev_tx_ring_size
= port
->tx_ring_size
;
6969 err
= mvpp2_check_ringparam_valid(dev
, ring
);
6973 if (!netif_running(dev
)) {
6974 port
->rx_ring_size
= ring
->rx_pending
;
6975 port
->tx_ring_size
= ring
->tx_pending
;
6979 /* The interface is running, so we have to force a
6980 * reallocation of the queues
6982 mvpp2_stop_dev(port
);
6983 mvpp2_cleanup_rxqs(port
);
6984 mvpp2_cleanup_txqs(port
);
6986 port
->rx_ring_size
= ring
->rx_pending
;
6987 port
->tx_ring_size
= ring
->tx_pending
;
6989 err
= mvpp2_setup_rxqs(port
);
6991 /* Reallocate Rx queues with the original ring size */
6992 port
->rx_ring_size
= prev_rx_ring_size
;
6993 ring
->rx_pending
= prev_rx_ring_size
;
6994 err
= mvpp2_setup_rxqs(port
);
6998 err
= mvpp2_setup_txqs(port
);
7000 /* Reallocate Tx queues with the original ring size */
7001 port
->tx_ring_size
= prev_tx_ring_size
;
7002 ring
->tx_pending
= prev_tx_ring_size
;
7003 err
= mvpp2_setup_txqs(port
);
7005 goto err_clean_rxqs
;
7008 mvpp2_start_dev(port
);
7009 mvpp2_egress_enable(port
);
7010 mvpp2_ingress_enable(port
);
7015 mvpp2_cleanup_rxqs(port
);
7017 netdev_err(dev
, "failed to change ring parameters");
7023 static const struct net_device_ops mvpp2_netdev_ops
= {
7024 .ndo_open
= mvpp2_open
,
7025 .ndo_stop
= mvpp2_stop
,
7026 .ndo_start_xmit
= mvpp2_tx
,
7027 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
7028 .ndo_set_mac_address
= mvpp2_set_mac_address
,
7029 .ndo_change_mtu
= mvpp2_change_mtu
,
7030 .ndo_get_stats64
= mvpp2_get_stats64
,
7031 .ndo_do_ioctl
= mvpp2_ioctl
,
7034 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
7035 .nway_reset
= phy_ethtool_nway_reset
,
7036 .get_link
= ethtool_op_get_link
,
7037 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
7038 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
7039 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
7040 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
7041 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
7042 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
7043 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
7046 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7047 * had a single IRQ defined per-port.
7049 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port
*port
,
7050 struct device_node
*port_node
)
7052 struct mvpp2_queue_vector
*v
= &port
->qvecs
[0];
7055 v
->nrxqs
= port
->nrxqs
;
7056 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
7057 v
->sw_thread_id
= 0;
7058 v
->sw_thread_mask
= *cpumask_bits(cpu_online_mask
);
7060 v
->irq
= irq_of_parse_and_map(port_node
, 0);
7063 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
7071 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port
*port
,
7072 struct device_node
*port_node
)
7074 struct mvpp2_queue_vector
*v
;
7077 port
->nqvecs
= num_possible_cpus();
7078 if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
)
7081 for (i
= 0; i
< port
->nqvecs
; i
++) {
7084 v
= port
->qvecs
+ i
;
7087 v
->type
= MVPP2_QUEUE_VECTOR_PRIVATE
;
7088 v
->sw_thread_id
= i
;
7089 v
->sw_thread_mask
= BIT(i
);
7091 snprintf(irqname
, sizeof(irqname
), "tx-cpu%d", i
);
7093 if (queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
7094 v
->first_rxq
= i
* MVPP2_DEFAULT_RXQ
;
7095 v
->nrxqs
= MVPP2_DEFAULT_RXQ
;
7096 } else if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
&&
7097 i
== (port
->nqvecs
- 1)) {
7099 v
->nrxqs
= port
->nrxqs
;
7100 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
7101 strncpy(irqname
, "rx-shared", sizeof(irqname
));
7104 v
->irq
= of_irq_get_byname(port_node
, irqname
);
7110 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
7117 for (i
= 0; i
< port
->nqvecs
; i
++)
7118 irq_dispose_mapping(port
->qvecs
[i
].irq
);
7122 static int mvpp2_queue_vectors_init(struct mvpp2_port
*port
,
7123 struct device_node
*port_node
)
7125 if (port
->has_tx_irqs
)
7126 return mvpp2_multi_queue_vectors_init(port
, port_node
);
7128 return mvpp2_simple_queue_vectors_init(port
, port_node
);
7131 static void mvpp2_queue_vectors_deinit(struct mvpp2_port
*port
)
7135 for (i
= 0; i
< port
->nqvecs
; i
++)
7136 irq_dispose_mapping(port
->qvecs
[i
].irq
);
7139 /* Configure Rx queue group interrupt for this port */
7140 static void mvpp2_rx_irqs_setup(struct mvpp2_port
*port
)
7142 struct mvpp2
*priv
= port
->priv
;
7146 if (priv
->hw_version
== MVPP21
) {
7147 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(port
->id
),
7152 /* Handle the more complicated PPv2.2 case */
7153 for (i
= 0; i
< port
->nqvecs
; i
++) {
7154 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
7159 val
= qv
->sw_thread_id
;
7160 val
|= port
->id
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
;
7161 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
7163 val
= qv
->first_rxq
;
7164 val
|= qv
->nrxqs
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
;
7165 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
7169 /* Initialize port HW */
7170 static int mvpp2_port_init(struct mvpp2_port
*port
)
7172 struct device
*dev
= port
->dev
->dev
.parent
;
7173 struct mvpp2
*priv
= port
->priv
;
7174 struct mvpp2_txq_pcpu
*txq_pcpu
;
7175 int queue
, cpu
, err
;
7177 /* Checks for hardware constraints */
7178 if (port
->first_rxq
+ port
->nrxqs
>
7179 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
7182 if (port
->nrxqs
% 4 || (port
->nrxqs
> priv
->max_port_rxqs
) ||
7183 (port
->ntxqs
> MVPP2_MAX_TXQ
))
7187 mvpp2_egress_disable(port
);
7188 mvpp2_port_disable(port
);
7190 port
->tx_time_coal
= MVPP2_TXDONE_COAL_USEC
;
7192 port
->txqs
= devm_kcalloc(dev
, port
->ntxqs
, sizeof(*port
->txqs
),
7197 /* Associate physical Tx queues to this port and initialize.
7198 * The mapping is predefined.
7200 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
7201 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
7202 struct mvpp2_tx_queue
*txq
;
7204 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
7207 goto err_free_percpu
;
7210 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
7213 goto err_free_percpu
;
7216 txq
->id
= queue_phy_id
;
7217 txq
->log_id
= queue
;
7218 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
7219 for_each_present_cpu(cpu
) {
7220 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
7221 txq_pcpu
->cpu
= cpu
;
7224 port
->txqs
[queue
] = txq
;
7227 port
->rxqs
= devm_kcalloc(dev
, port
->nrxqs
, sizeof(*port
->rxqs
),
7231 goto err_free_percpu
;
7234 /* Allocate and initialize Rx queue for this port */
7235 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
7236 struct mvpp2_rx_queue
*rxq
;
7238 /* Map physical Rx queue to port's logical Rx queue */
7239 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
7242 goto err_free_percpu
;
7244 /* Map this Rx queue to a physical queue */
7245 rxq
->id
= port
->first_rxq
+ queue
;
7246 rxq
->port
= port
->id
;
7247 rxq
->logic_rxq
= queue
;
7249 port
->rxqs
[queue
] = rxq
;
7252 mvpp2_rx_irqs_setup(port
);
7254 /* Create Rx descriptor rings */
7255 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
7256 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
7258 rxq
->size
= port
->rx_ring_size
;
7259 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
7260 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
7263 mvpp2_ingress_disable(port
);
7265 /* Port default configuration */
7266 mvpp2_defaults_set(port
);
7268 /* Port's classifier configuration */
7269 mvpp2_cls_oversize_rxq_set(port
);
7270 mvpp2_cls_port_config(port
);
7272 /* Provide an initial Rx packet size */
7273 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
7275 /* Initialize pools for swf */
7276 err
= mvpp2_swf_bm_pool_init(port
);
7278 goto err_free_percpu
;
7283 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
7284 if (!port
->txqs
[queue
])
7286 free_percpu(port
->txqs
[queue
]->pcpu
);
7291 /* Checks if the port DT description has the TX interrupts
7292 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
7293 * there are available, but we need to keep support for old DTs.
7295 static bool mvpp2_port_has_tx_irqs(struct mvpp2
*priv
,
7296 struct device_node
*port_node
)
7298 char *irqs
[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
7299 "tx-cpu2", "tx-cpu3" };
7302 if (priv
->hw_version
== MVPP21
)
7305 for (i
= 0; i
< 5; i
++) {
7306 ret
= of_property_match_string(port_node
, "interrupt-names",
7315 /* Ports initialization */
7316 static int mvpp2_port_probe(struct platform_device
*pdev
,
7317 struct device_node
*port_node
,
7320 struct device_node
*phy_node
;
7322 struct mvpp2_port
*port
;
7323 struct mvpp2_port_pcpu
*port_pcpu
;
7324 struct net_device
*dev
;
7325 struct resource
*res
;
7326 const char *dt_mac_addr
;
7327 const char *mac_from
;
7328 char hw_mac_addr
[ETH_ALEN
];
7329 unsigned int ntxqs
, nrxqs
;
7336 has_tx_irqs
= mvpp2_port_has_tx_irqs(priv
, port_node
);
7339 queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
7341 ntxqs
= MVPP2_MAX_TXQ
;
7342 if (priv
->hw_version
== MVPP22
&& queue_mode
== MVPP2_QDIST_MULTI_MODE
)
7343 nrxqs
= MVPP2_DEFAULT_RXQ
* num_possible_cpus();
7345 nrxqs
= MVPP2_DEFAULT_RXQ
;
7347 dev
= alloc_etherdev_mqs(sizeof(*port
), ntxqs
, nrxqs
);
7351 phy_node
= of_parse_phandle(port_node
, "phy", 0);
7352 phy_mode
= of_get_phy_mode(port_node
);
7354 dev_err(&pdev
->dev
, "incorrect phy mode\n");
7356 goto err_free_netdev
;
7359 comphy
= devm_of_phy_get(&pdev
->dev
, port_node
, NULL
);
7360 if (IS_ERR(comphy
)) {
7361 if (PTR_ERR(comphy
) == -EPROBE_DEFER
) {
7362 err
= -EPROBE_DEFER
;
7363 goto err_free_netdev
;
7368 if (of_property_read_u32(port_node
, "port-id", &id
)) {
7370 dev_err(&pdev
->dev
, "missing port-id value\n");
7371 goto err_free_netdev
;
7374 dev
->tx_queue_len
= MVPP2_MAX_TXD
;
7375 dev
->watchdog_timeo
= 5 * HZ
;
7376 dev
->netdev_ops
= &mvpp2_netdev_ops
;
7377 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
7379 port
= netdev_priv(dev
);
7381 port
->ntxqs
= ntxqs
;
7382 port
->nrxqs
= nrxqs
;
7384 port
->has_tx_irqs
= has_tx_irqs
;
7386 err
= mvpp2_queue_vectors_init(port
, port_node
);
7388 goto err_free_netdev
;
7390 if (of_property_read_bool(port_node
, "marvell,loopback"))
7391 port
->flags
|= MVPP2_F_LOOPBACK
;
7394 if (priv
->hw_version
== MVPP21
)
7395 port
->first_rxq
= port
->id
* port
->nrxqs
;
7397 port
->first_rxq
= port
->id
* priv
->max_port_rxqs
;
7399 port
->phy_node
= phy_node
;
7400 port
->phy_interface
= phy_mode
;
7401 port
->comphy
= comphy
;
7403 if (priv
->hw_version
== MVPP21
) {
7404 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2 + id
);
7405 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
7406 if (IS_ERR(port
->base
)) {
7407 err
= PTR_ERR(port
->base
);
7408 goto err_deinit_qvecs
;
7411 if (of_property_read_u32(port_node
, "gop-port-id",
7414 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
7415 goto err_deinit_qvecs
;
7418 port
->base
= priv
->iface_base
+ MVPP22_GMAC_BASE(port
->gop_id
);
7421 /* Alloc per-cpu stats */
7422 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
7425 goto err_deinit_qvecs
;
7428 dt_mac_addr
= of_get_mac_address(port_node
);
7429 if (dt_mac_addr
&& is_valid_ether_addr(dt_mac_addr
)) {
7430 mac_from
= "device tree";
7431 ether_addr_copy(dev
->dev_addr
, dt_mac_addr
);
7433 if (priv
->hw_version
== MVPP21
)
7434 mvpp21_get_mac_address(port
, hw_mac_addr
);
7435 if (is_valid_ether_addr(hw_mac_addr
)) {
7436 mac_from
= "hardware";
7437 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
7439 mac_from
= "random";
7440 eth_hw_addr_random(dev
);
7444 port
->tx_ring_size
= MVPP2_MAX_TXD
;
7445 port
->rx_ring_size
= MVPP2_MAX_RXD
;
7446 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7448 err
= mvpp2_port_init(port
);
7450 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
7451 goto err_free_stats
;
7454 mvpp2_port_periodic_xon_disable(port
);
7456 if (priv
->hw_version
== MVPP21
)
7457 mvpp2_port_fc_adv_enable(port
);
7459 mvpp2_port_reset(port
);
7461 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
7464 goto err_free_txq_pcpu
;
7467 if (!port
->has_tx_irqs
) {
7468 for_each_present_cpu(cpu
) {
7469 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
7471 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
7472 HRTIMER_MODE_REL_PINNED
);
7473 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
7474 port_pcpu
->timer_scheduled
= false;
7476 tasklet_init(&port_pcpu
->tx_done_tasklet
,
7478 (unsigned long)dev
);
7482 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
7483 dev
->features
= features
| NETIF_F_RXCSUM
;
7484 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
;
7485 dev
->vlan_features
|= features
;
7487 /* MTU range: 68 - 9676 */
7488 dev
->min_mtu
= ETH_MIN_MTU
;
7489 /* 9676 == 9700 - 20 and rounding to 8 */
7490 dev
->max_mtu
= 9676;
7492 err
= register_netdev(dev
);
7494 dev_err(&pdev
->dev
, "failed to register netdev\n");
7495 goto err_free_port_pcpu
;
7497 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
7499 priv
->port_list
[id
] = port
;
7503 free_percpu(port
->pcpu
);
7505 for (i
= 0; i
< port
->ntxqs
; i
++)
7506 free_percpu(port
->txqs
[i
]->pcpu
);
7508 free_percpu(port
->stats
);
7510 mvpp2_queue_vectors_deinit(port
);
7512 of_node_put(phy_node
);
7517 /* Ports removal routine */
7518 static void mvpp2_port_remove(struct mvpp2_port
*port
)
7522 unregister_netdev(port
->dev
);
7523 of_node_put(port
->phy_node
);
7524 free_percpu(port
->pcpu
);
7525 free_percpu(port
->stats
);
7526 for (i
= 0; i
< port
->ntxqs
; i
++)
7527 free_percpu(port
->txqs
[i
]->pcpu
);
7528 mvpp2_queue_vectors_deinit(port
);
7529 free_netdev(port
->dev
);
7532 /* Initialize decoding windows */
7533 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
7539 for (i
= 0; i
< 6; i
++) {
7540 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
7541 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
7544 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
7549 for (i
= 0; i
< dram
->num_cs
; i
++) {
7550 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
7552 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
7553 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
7554 dram
->mbus_dram_target_id
);
7556 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
7557 (cs
->size
- 1) & 0xffff0000);
7559 win_enable
|= (1 << i
);
7562 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
7565 /* Initialize Rx FIFO's */
7566 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
7570 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
7571 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
7572 MVPP2_RX_FIFO_PORT_DATA_SIZE
);
7573 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
7574 MVPP2_RX_FIFO_PORT_ATTR_SIZE
);
7577 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
7578 MVPP2_RX_FIFO_PORT_MIN_PKT
);
7579 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
7582 static void mvpp2_axi_init(struct mvpp2
*priv
)
7584 u32 val
, rdval
, wrval
;
7586 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
7588 /* AXI Bridge Configuration */
7590 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
7591 << MVPP22_AXI_ATTR_CACHE_OFFS
;
7592 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7593 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
7595 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
7596 << MVPP22_AXI_ATTR_CACHE_OFFS
;
7597 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7598 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
7601 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
7602 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
7605 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
7606 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
7607 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
7608 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
7611 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
7612 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
7614 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
7615 << MVPP22_AXI_CODE_CACHE_OFFS
;
7616 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7617 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
7618 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
7619 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
7621 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
7622 << MVPP22_AXI_CODE_CACHE_OFFS
;
7623 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7624 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
7626 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
7628 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
7629 << MVPP22_AXI_CODE_CACHE_OFFS
;
7630 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7631 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
7633 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
7636 /* Initialize network controller common part HW */
7637 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
7639 const struct mbus_dram_target_info
*dram_target_info
;
7643 /* MBUS windows configuration */
7644 dram_target_info
= mv_mbus_dram_info();
7645 if (dram_target_info
)
7646 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
7648 if (priv
->hw_version
== MVPP22
)
7649 mvpp2_axi_init(priv
);
7651 /* Disable HW PHY polling */
7652 if (priv
->hw_version
== MVPP21
) {
7653 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
7654 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
7655 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
7657 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
7658 val
&= ~MVPP22_SMI_POLLING_EN
;
7659 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
7662 /* Allocate and initialize aggregated TXQs */
7663 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
7664 sizeof(*priv
->aggr_txqs
),
7666 if (!priv
->aggr_txqs
)
7669 for_each_present_cpu(i
) {
7670 priv
->aggr_txqs
[i
].id
= i
;
7671 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
7672 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
], i
, priv
);
7678 mvpp2_rx_fifo_init(priv
);
7680 if (priv
->hw_version
== MVPP21
)
7681 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
7682 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
7684 /* Allow cache snoop when transmiting packets */
7685 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
7687 /* Buffer Manager initialization */
7688 err
= mvpp2_bm_init(pdev
, priv
);
7692 /* Parser default initialization */
7693 err
= mvpp2_prs_default_init(pdev
, priv
);
7697 /* Classifier default initialization */
7698 mvpp2_cls_init(priv
);
7703 static int mvpp2_probe(struct platform_device
*pdev
)
7705 struct device_node
*dn
= pdev
->dev
.of_node
;
7706 struct device_node
*port_node
;
7708 struct resource
*res
;
7713 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
7718 (unsigned long)of_device_get_match_data(&pdev
->dev
);
7720 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
7721 base
= devm_ioremap_resource(&pdev
->dev
, res
);
7723 return PTR_ERR(base
);
7725 if (priv
->hw_version
== MVPP21
) {
7726 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
7727 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
7728 if (IS_ERR(priv
->lms_base
))
7729 return PTR_ERR(priv
->lms_base
);
7731 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
7732 priv
->iface_base
= devm_ioremap_resource(&pdev
->dev
, res
);
7733 if (IS_ERR(priv
->iface_base
))
7734 return PTR_ERR(priv
->iface_base
);
7736 priv
->sysctrl_base
=
7737 syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
7738 "marvell,system-controller");
7739 if (IS_ERR(priv
->sysctrl_base
))
7740 /* The system controller regmap is optional for dt
7741 * compatibility reasons. When not provided, the
7742 * configuration of the GoP relies on the
7743 * firmware/bootloader.
7745 priv
->sysctrl_base
= NULL
;
7748 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
7751 addr_space_sz
= (priv
->hw_version
== MVPP21
?
7752 MVPP21_ADDR_SPACE_SZ
: MVPP22_ADDR_SPACE_SZ
);
7753 priv
->swth_base
[i
] = base
+ i
* addr_space_sz
;
7756 if (priv
->hw_version
== MVPP21
)
7757 priv
->max_port_rxqs
= 8;
7759 priv
->max_port_rxqs
= 32;
7761 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
7762 if (IS_ERR(priv
->pp_clk
))
7763 return PTR_ERR(priv
->pp_clk
);
7764 err
= clk_prepare_enable(priv
->pp_clk
);
7768 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
7769 if (IS_ERR(priv
->gop_clk
)) {
7770 err
= PTR_ERR(priv
->gop_clk
);
7773 err
= clk_prepare_enable(priv
->gop_clk
);
7777 if (priv
->hw_version
== MVPP22
) {
7778 priv
->mg_clk
= devm_clk_get(&pdev
->dev
, "mg_clk");
7779 if (IS_ERR(priv
->mg_clk
)) {
7780 err
= PTR_ERR(priv
->mg_clk
);
7784 err
= clk_prepare_enable(priv
->mg_clk
);
7789 /* Get system's tclk rate */
7790 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
7792 if (priv
->hw_version
== MVPP22
) {
7793 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(40));
7796 /* Sadly, the BM pools all share the same register to
7797 * store the high 32 bits of their address. So they
7798 * must all have the same high 32 bits, which forces
7799 * us to restrict coherent memory to DMA_BIT_MASK(32).
7801 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
7806 /* Initialize network controller */
7807 err
= mvpp2_init(pdev
, priv
);
7809 dev_err(&pdev
->dev
, "failed to initialize controller\n");
7813 port_count
= of_get_available_child_count(dn
);
7814 if (port_count
== 0) {
7815 dev_err(&pdev
->dev
, "no ports enabled\n");
7820 priv
->port_list
= devm_kcalloc(&pdev
->dev
, port_count
,
7821 sizeof(*priv
->port_list
),
7823 if (!priv
->port_list
) {
7828 /* Initialize ports */
7829 for_each_available_child_of_node(dn
, port_node
) {
7830 err
= mvpp2_port_probe(pdev
, port_node
, priv
);
7835 platform_set_drvdata(pdev
, priv
);
7839 if (priv
->hw_version
== MVPP22
)
7840 clk_disable_unprepare(priv
->mg_clk
);
7842 clk_disable_unprepare(priv
->gop_clk
);
7844 clk_disable_unprepare(priv
->pp_clk
);
7848 static int mvpp2_remove(struct platform_device
*pdev
)
7850 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
7851 struct device_node
*dn
= pdev
->dev
.of_node
;
7852 struct device_node
*port_node
;
7855 for_each_available_child_of_node(dn
, port_node
) {
7856 if (priv
->port_list
[i
])
7857 mvpp2_port_remove(priv
->port_list
[i
]);
7861 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
7862 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
7864 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
7867 for_each_present_cpu(i
) {
7868 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
7870 dma_free_coherent(&pdev
->dev
,
7871 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
7873 aggr_txq
->descs_dma
);
7876 clk_disable_unprepare(priv
->mg_clk
);
7877 clk_disable_unprepare(priv
->pp_clk
);
7878 clk_disable_unprepare(priv
->gop_clk
);
7883 static const struct of_device_id mvpp2_match
[] = {
7885 .compatible
= "marvell,armada-375-pp2",
7886 .data
= (void *)MVPP21
,
7889 .compatible
= "marvell,armada-7k-pp22",
7890 .data
= (void *)MVPP22
,
7894 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
7896 static struct platform_driver mvpp2_driver
= {
7897 .probe
= mvpp2_probe
,
7898 .remove
= mvpp2_remove
,
7900 .name
= MVPP2_DRIVER_NAME
,
7901 .of_match_table
= mvpp2_match
,
7905 module_platform_driver(mvpp2_driver
);
7907 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7908 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7909 MODULE_LICENSE("GPL v2");