net: hns3: Fix a response data read error of tqp statistics query
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
blob80049223519f1b24232abf54a823aee4363dbd5a
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <net/rtnetlink.h>
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mbx.h"
25 #include "hclge_mdio.h"
26 #include "hclge_tm.h"
27 #include "hnae3.h"
29 #define HCLGE_NAME "hclge"
30 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
31 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
33 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
35 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
36 enum hclge_mta_dmac_sel_type mta_mac_sel,
37 bool enable);
38 static int hclge_init_vlan_config(struct hclge_dev *hdev);
39 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
52 {0, }
55 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
56 "Mac Loopback test",
57 "Serdes Loopback test",
58 "Phy Loopback test"
61 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
62 {"igu_rx_oversize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
64 {"igu_rx_undersize_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
66 {"igu_rx_out_all_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
68 {"igu_rx_uni_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
70 {"igu_rx_multi_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
72 {"igu_rx_broad_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
74 {"egu_tx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
76 {"egu_tx_uni_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
78 {"egu_tx_multi_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
80 {"egu_tx_broad_pkt",
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
82 {"ssu_ppp_mac_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
84 {"ssu_ppp_host_key_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
86 {"ppp_ssu_mac_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
88 {"ppp_ssu_host_rlt_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
90 {"ssu_tx_in_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
92 {"ssu_tx_out_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
94 {"ssu_rx_in_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
96 {"ssu_rx_out_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
100 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
101 {"igu_rx_err_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
103 {"igu_rx_no_eof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
105 {"igu_rx_no_sof_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
107 {"egu_tx_1588_pkt",
108 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
109 {"ssu_full_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
111 {"ssu_part_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
113 {"ppp_key_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
115 {"ppp_rlt_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
117 {"ssu_key_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
119 {"pkt_curr_buf_cnt",
120 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
121 {"qcn_fb_rcv_cnt",
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
123 {"qcn_fb_drop_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
125 {"qcn_fb_invaild_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
127 {"rx_packet_tc0_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
129 {"rx_packet_tc1_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
131 {"rx_packet_tc2_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
133 {"rx_packet_tc3_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
135 {"rx_packet_tc4_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
137 {"rx_packet_tc5_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
139 {"rx_packet_tc6_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
141 {"rx_packet_tc7_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
143 {"rx_packet_tc0_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
145 {"rx_packet_tc1_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
147 {"rx_packet_tc2_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
149 {"rx_packet_tc3_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
151 {"rx_packet_tc4_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
153 {"rx_packet_tc5_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
155 {"rx_packet_tc6_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
157 {"rx_packet_tc7_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
159 {"tx_packet_tc0_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
161 {"tx_packet_tc1_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
163 {"tx_packet_tc2_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
165 {"tx_packet_tc3_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
167 {"tx_packet_tc4_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
169 {"tx_packet_tc5_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
171 {"tx_packet_tc6_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
173 {"tx_packet_tc7_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
175 {"tx_packet_tc0_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
177 {"tx_packet_tc1_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
179 {"tx_packet_tc2_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
181 {"tx_packet_tc3_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
183 {"tx_packet_tc4_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
185 {"tx_packet_tc5_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
187 {"tx_packet_tc6_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
189 {"tx_packet_tc7_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
191 {"pkt_curr_buf_tc0_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
193 {"pkt_curr_buf_tc1_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
195 {"pkt_curr_buf_tc2_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
197 {"pkt_curr_buf_tc3_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
199 {"pkt_curr_buf_tc4_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
201 {"pkt_curr_buf_tc5_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
203 {"pkt_curr_buf_tc6_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
205 {"pkt_curr_buf_tc7_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
207 {"mb_uncopy_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
209 {"lo_pri_unicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
211 {"hi_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
213 {"lo_pri_multicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
215 {"rx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
217 {"tx_oq_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
219 {"nic_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
221 {"roc_l2_err_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
225 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
226 {"mac_tx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
228 {"mac_rx_mac_pause_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
230 {"mac_tx_pfc_pri0_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
232 {"mac_tx_pfc_pri1_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
234 {"mac_tx_pfc_pri2_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
236 {"mac_tx_pfc_pri3_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
238 {"mac_tx_pfc_pri4_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
240 {"mac_tx_pfc_pri5_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
242 {"mac_tx_pfc_pri6_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
244 {"mac_tx_pfc_pri7_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
246 {"mac_rx_pfc_pri0_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
248 {"mac_rx_pfc_pri1_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
250 {"mac_rx_pfc_pri2_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
252 {"mac_rx_pfc_pri3_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
254 {"mac_rx_pfc_pri4_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
256 {"mac_rx_pfc_pri5_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
258 {"mac_rx_pfc_pri6_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
260 {"mac_rx_pfc_pri7_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
262 {"mac_tx_total_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
264 {"mac_tx_total_oct_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
266 {"mac_tx_good_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
268 {"mac_tx_bad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
270 {"mac_tx_good_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
272 {"mac_tx_bad_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
274 {"mac_tx_uni_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
276 {"mac_tx_multi_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
278 {"mac_tx_broad_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
280 {"mac_tx_undersize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
282 {"mac_tx_oversize_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
284 {"mac_tx_64_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
286 {"mac_tx_65_127_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
288 {"mac_tx_128_255_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
290 {"mac_tx_256_511_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
292 {"mac_tx_512_1023_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
294 {"mac_tx_1024_1518_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
296 {"mac_tx_1519_max_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
298 {"mac_rx_total_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
300 {"mac_rx_total_oct_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
302 {"mac_rx_good_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
304 {"mac_rx_bad_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
306 {"mac_rx_good_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
308 {"mac_rx_bad_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
310 {"mac_rx_uni_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
312 {"mac_rx_multi_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
314 {"mac_rx_broad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
316 {"mac_rx_undersize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
318 {"mac_rx_oversize_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
320 {"mac_rx_64_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
322 {"mac_rx_65_127_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
324 {"mac_rx_128_255_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
326 {"mac_rx_256_511_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
328 {"mac_rx_512_1023_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
330 {"mac_rx_1024_1518_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
332 {"mac_rx_1519_max_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
335 {"mac_tx_fragment_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
337 {"mac_tx_undermin_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
339 {"mac_tx_jabber_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
341 {"mac_tx_err_all_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
343 {"mac_tx_from_app_good_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
345 {"mac_tx_from_app_bad_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
347 {"mac_rx_fragment_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
349 {"mac_rx_undermin_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
351 {"mac_rx_jabber_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
353 {"mac_rx_fcs_err_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
355 {"mac_rx_send_app_good_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
357 {"mac_rx_send_app_bad_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
361 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
363 #define HCLGE_64_BIT_CMD_NUM 5
364 #define HCLGE_64_BIT_RTN_DATANUM 4
365 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
366 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
367 __le64 *desc_data;
368 int i, k, n;
369 int ret;
371 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
372 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
373 if (ret) {
374 dev_err(&hdev->pdev->dev,
375 "Get 64 bit pkt stats fail, status = %d.\n", ret);
376 return ret;
379 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
380 if (unlikely(i == 0)) {
381 desc_data = (__le64 *)(&desc[i].data[0]);
382 n = HCLGE_64_BIT_RTN_DATANUM - 1;
383 } else {
384 desc_data = (__le64 *)(&desc[i]);
385 n = HCLGE_64_BIT_RTN_DATANUM;
387 for (k = 0; k < n; k++) {
388 *data++ += le64_to_cpu(*desc_data);
389 desc_data++;
393 return 0;
396 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
398 stats->pkt_curr_buf_cnt = 0;
399 stats->pkt_curr_buf_tc0_cnt = 0;
400 stats->pkt_curr_buf_tc1_cnt = 0;
401 stats->pkt_curr_buf_tc2_cnt = 0;
402 stats->pkt_curr_buf_tc3_cnt = 0;
403 stats->pkt_curr_buf_tc4_cnt = 0;
404 stats->pkt_curr_buf_tc5_cnt = 0;
405 stats->pkt_curr_buf_tc6_cnt = 0;
406 stats->pkt_curr_buf_tc7_cnt = 0;
409 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
411 #define HCLGE_32_BIT_CMD_NUM 8
412 #define HCLGE_32_BIT_RTN_DATANUM 8
414 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
415 struct hclge_32_bit_stats *all_32_bit_stats;
416 __le32 *desc_data;
417 int i, k, n;
418 u64 *data;
419 int ret;
421 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
422 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
424 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
425 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
426 if (ret) {
427 dev_err(&hdev->pdev->dev,
428 "Get 32 bit pkt stats fail, status = %d.\n", ret);
430 return ret;
433 hclge_reset_partial_32bit_counter(all_32_bit_stats);
434 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
435 if (unlikely(i == 0)) {
436 __le16 *desc_data_16bit;
438 all_32_bit_stats->igu_rx_err_pkt +=
439 le32_to_cpu(desc[i].data[0]);
441 desc_data_16bit = (__le16 *)&desc[i].data[1];
442 all_32_bit_stats->igu_rx_no_eof_pkt +=
443 le16_to_cpu(*desc_data_16bit);
445 desc_data_16bit++;
446 all_32_bit_stats->igu_rx_no_sof_pkt +=
447 le16_to_cpu(*desc_data_16bit);
449 desc_data = &desc[i].data[2];
450 n = HCLGE_32_BIT_RTN_DATANUM - 4;
451 } else {
452 desc_data = (__le32 *)&desc[i];
453 n = HCLGE_32_BIT_RTN_DATANUM;
455 for (k = 0; k < n; k++) {
456 *data++ += le32_to_cpu(*desc_data);
457 desc_data++;
461 return 0;
464 static int hclge_mac_update_stats(struct hclge_dev *hdev)
466 #define HCLGE_MAC_CMD_NUM 17
467 #define HCLGE_RTN_DATA_NUM 4
469 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
470 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
471 __le64 *desc_data;
472 int i, k, n;
473 int ret;
475 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
476 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
477 if (ret) {
478 dev_err(&hdev->pdev->dev,
479 "Get MAC pkt stats fail, status = %d.\n", ret);
481 return ret;
484 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
485 if (unlikely(i == 0)) {
486 desc_data = (__le64 *)(&desc[i].data[0]);
487 n = HCLGE_RTN_DATA_NUM - 2;
488 } else {
489 desc_data = (__le64 *)(&desc[i]);
490 n = HCLGE_RTN_DATA_NUM;
492 for (k = 0; k < n; k++) {
493 *data++ += le64_to_cpu(*desc_data);
494 desc_data++;
498 return 0;
501 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504 struct hclge_vport *vport = hclge_get_vport(handle);
505 struct hclge_dev *hdev = vport->back;
506 struct hnae3_queue *queue;
507 struct hclge_desc desc[1];
508 struct hclge_tqp *tqp;
509 int ret, i;
511 for (i = 0; i < kinfo->num_tqps; i++) {
512 queue = handle->kinfo.tqp[i];
513 tqp = container_of(queue, struct hclge_tqp, q);
514 /* command : HCLGE_OPC_QUERY_IGU_STAT */
515 hclge_cmd_setup_basic_desc(&desc[0],
516 HCLGE_OPC_QUERY_RX_STATUS,
517 true);
519 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
520 ret = hclge_cmd_send(&hdev->hw, desc, 1);
521 if (ret) {
522 dev_err(&hdev->pdev->dev,
523 "Query tqp stat fail, status = %d,queue = %d\n",
524 ret, i);
525 return ret;
527 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
528 le32_to_cpu(desc[0].data[1]);
531 for (i = 0; i < kinfo->num_tqps; i++) {
532 queue = handle->kinfo.tqp[i];
533 tqp = container_of(queue, struct hclge_tqp, q);
534 /* command : HCLGE_OPC_QUERY_IGU_STAT */
535 hclge_cmd_setup_basic_desc(&desc[0],
536 HCLGE_OPC_QUERY_TX_STATUS,
537 true);
539 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
540 ret = hclge_cmd_send(&hdev->hw, desc, 1);
541 if (ret) {
542 dev_err(&hdev->pdev->dev,
543 "Query tqp stat fail, status = %d,queue = %d\n",
544 ret, i);
545 return ret;
547 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
548 le32_to_cpu(desc[0].data[1]);
551 return 0;
554 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
556 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
557 struct hclge_tqp *tqp;
558 u64 *buff = data;
559 int i;
561 for (i = 0; i < kinfo->num_tqps; i++) {
562 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
563 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
566 for (i = 0; i < kinfo->num_tqps; i++) {
567 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
568 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
571 return buff;
574 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
576 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
578 return kinfo->num_tqps * (2);
581 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
583 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
584 u8 *buff = data;
585 int i = 0;
587 for (i = 0; i < kinfo->num_tqps; i++) {
588 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
589 struct hclge_tqp, q);
590 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
591 tqp->index);
592 buff = buff + ETH_GSTRING_LEN;
595 for (i = 0; i < kinfo->num_tqps; i++) {
596 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
597 struct hclge_tqp, q);
598 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
599 tqp->index);
600 buff = buff + ETH_GSTRING_LEN;
603 return buff;
606 static u64 *hclge_comm_get_stats(void *comm_stats,
607 const struct hclge_comm_stats_str strs[],
608 int size, u64 *data)
610 u64 *buf = data;
611 u32 i;
613 for (i = 0; i < size; i++)
614 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
616 return buf + size;
619 static u8 *hclge_comm_get_strings(u32 stringset,
620 const struct hclge_comm_stats_str strs[],
621 int size, u8 *data)
623 char *buff = (char *)data;
624 u32 i;
626 if (stringset != ETH_SS_STATS)
627 return buff;
629 for (i = 0; i < size; i++) {
630 snprintf(buff, ETH_GSTRING_LEN,
631 strs[i].desc);
632 buff = buff + ETH_GSTRING_LEN;
635 return (u8 *)buff;
638 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
639 struct net_device_stats *net_stats)
641 net_stats->tx_dropped = 0;
642 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
643 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
644 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
646 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
647 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
649 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
650 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
652 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
653 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
655 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
656 net_stats->rx_length_errors =
657 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
658 net_stats->rx_length_errors +=
659 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
660 net_stats->rx_over_errors =
661 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
664 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
666 struct hnae3_handle *handle;
667 int status;
669 handle = &hdev->vport[0].nic;
670 if (handle->client) {
671 status = hclge_tqps_update_stats(handle);
672 if (status) {
673 dev_err(&hdev->pdev->dev,
674 "Update TQPS stats fail, status = %d.\n",
675 status);
679 status = hclge_mac_update_stats(hdev);
680 if (status)
681 dev_err(&hdev->pdev->dev,
682 "Update MAC stats fail, status = %d.\n", status);
684 status = hclge_32_bit_update_stats(hdev);
685 if (status)
686 dev_err(&hdev->pdev->dev,
687 "Update 32 bit stats fail, status = %d.\n",
688 status);
690 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 struct net_device_stats *net_stats)
696 struct hclge_vport *vport = hclge_get_vport(handle);
697 struct hclge_dev *hdev = vport->back;
698 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
699 int status;
701 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
702 return;
704 status = hclge_mac_update_stats(hdev);
705 if (status)
706 dev_err(&hdev->pdev->dev,
707 "Update MAC stats fail, status = %d.\n",
708 status);
710 status = hclge_32_bit_update_stats(hdev);
711 if (status)
712 dev_err(&hdev->pdev->dev,
713 "Update 32 bit stats fail, status = %d.\n",
714 status);
716 status = hclge_64_bit_update_stats(hdev);
717 if (status)
718 dev_err(&hdev->pdev->dev,
719 "Update 64 bit stats fail, status = %d.\n",
720 status);
722 status = hclge_tqps_update_stats(handle);
723 if (status)
724 dev_err(&hdev->pdev->dev,
725 "Update TQPS stats fail, status = %d.\n",
726 status);
728 hclge_update_netstat(hw_stats, net_stats);
730 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
733 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
735 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
737 struct hclge_vport *vport = hclge_get_vport(handle);
738 struct hclge_dev *hdev = vport->back;
739 int count = 0;
741 /* Loopback test support rules:
742 * mac: only GE mode support
743 * serdes: all mac mode will support include GE/XGE/LGE/CGE
744 * phy: only support when phy device exist on board
746 if (stringset == ETH_SS_TEST) {
747 /* clear loopback bit flags at first */
748 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
749 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
750 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
751 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
752 count += 1;
753 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
754 } else {
755 count = -EOPNOTSUPP;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 ARRAY_SIZE(g_all_32bit_stats_string) +
760 ARRAY_SIZE(g_all_64bit_stats_string) +
761 hclge_tqps_get_sset_count(handle, stringset);
764 return count;
767 static void hclge_get_strings(struct hnae3_handle *handle,
768 u32 stringset,
769 u8 *data)
771 u8 *p = (char *)data;
772 int size;
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset,
777 g_mac_stats_string,
778 size,
780 size = ARRAY_SIZE(g_all_32bit_stats_string);
781 p = hclge_comm_get_strings(stringset,
782 g_all_32bit_stats_string,
783 size,
785 size = ARRAY_SIZE(g_all_64bit_stats_string);
786 p = hclge_comm_get_strings(stringset,
787 g_all_64bit_stats_string,
788 size,
790 p = hclge_tqps_get_strings(handle, p);
791 } else if (stringset == ETH_SS_TEST) {
792 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
793 memcpy(p,
794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
795 ETH_GSTRING_LEN);
796 p += ETH_GSTRING_LEN;
798 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
799 memcpy(p,
800 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
801 ETH_GSTRING_LEN);
802 p += ETH_GSTRING_LEN;
804 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
805 memcpy(p,
806 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
807 ETH_GSTRING_LEN);
808 p += ETH_GSTRING_LEN;
813 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
817 u64 *p;
819 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
820 g_mac_stats_string,
821 ARRAY_SIZE(g_mac_stats_string),
822 data);
823 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
824 g_all_32bit_stats_string,
825 ARRAY_SIZE(g_all_32bit_stats_string),
827 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
828 g_all_64bit_stats_string,
829 ARRAY_SIZE(g_all_64bit_stats_string),
831 p = hclge_tqps_get_stats(handle, p);
834 static int hclge_parse_func_status(struct hclge_dev *hdev,
835 struct hclge_func_status_cmd *status)
837 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
838 return -EINVAL;
840 /* Set the pf to main pf */
841 if (status->pf_state & HCLGE_PF_STATE_MAIN)
842 hdev->flag |= HCLGE_FLAG_MAIN;
843 else
844 hdev->flag &= ~HCLGE_FLAG_MAIN;
846 return 0;
849 static int hclge_query_function_status(struct hclge_dev *hdev)
851 struct hclge_func_status_cmd *req;
852 struct hclge_desc desc;
853 int timeout = 0;
854 int ret;
856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
857 req = (struct hclge_func_status_cmd *)desc.data;
859 do {
860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
861 if (ret) {
862 dev_err(&hdev->pdev->dev,
863 "query function status failed %d.\n",
864 ret);
866 return ret;
869 /* Check pf reset is done */
870 if (req->pf_state)
871 break;
872 usleep_range(1000, 2000);
873 } while (timeout++ < 5);
875 ret = hclge_parse_func_status(hdev, req);
877 return ret;
880 static int hclge_query_pf_resource(struct hclge_dev *hdev)
882 struct hclge_pf_res_cmd *req;
883 struct hclge_desc desc;
884 int ret;
886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
887 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
888 if (ret) {
889 dev_err(&hdev->pdev->dev,
890 "query pf resource failed %d.\n", ret);
891 return ret;
894 req = (struct hclge_pf_res_cmd *)desc.data;
895 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
896 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
898 if (hnae3_dev_roce_supported(hdev)) {
899 hdev->num_roce_msi =
900 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
901 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
903 /* PF should have NIC vectors and Roce vectors,
904 * NIC vectors are queued before Roce vectors.
906 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
907 } else {
908 hdev->num_msi =
909 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
913 return 0;
916 static int hclge_parse_speed(int speed_cmd, int *speed)
918 switch (speed_cmd) {
919 case 6:
920 *speed = HCLGE_MAC_SPEED_10M;
921 break;
922 case 7:
923 *speed = HCLGE_MAC_SPEED_100M;
924 break;
925 case 0:
926 *speed = HCLGE_MAC_SPEED_1G;
927 break;
928 case 1:
929 *speed = HCLGE_MAC_SPEED_10G;
930 break;
931 case 2:
932 *speed = HCLGE_MAC_SPEED_25G;
933 break;
934 case 3:
935 *speed = HCLGE_MAC_SPEED_40G;
936 break;
937 case 4:
938 *speed = HCLGE_MAC_SPEED_50G;
939 break;
940 case 5:
941 *speed = HCLGE_MAC_SPEED_100G;
942 break;
943 default:
944 return -EINVAL;
947 return 0;
950 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
952 struct hclge_cfg_param_cmd *req;
953 u64 mac_addr_tmp_high;
954 u64 mac_addr_tmp;
955 int i;
957 req = (struct hclge_cfg_param_cmd *)desc[0].data;
959 /* get the configuration */
960 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
961 HCLGE_CFG_VMDQ_M,
962 HCLGE_CFG_VMDQ_S);
963 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
964 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
965 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
966 HCLGE_CFG_TQP_DESC_N_M,
967 HCLGE_CFG_TQP_DESC_N_S);
969 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
970 HCLGE_CFG_PHY_ADDR_M,
971 HCLGE_CFG_PHY_ADDR_S);
972 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
973 HCLGE_CFG_MEDIA_TP_M,
974 HCLGE_CFG_MEDIA_TP_S);
975 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
976 HCLGE_CFG_RX_BUF_LEN_M,
977 HCLGE_CFG_RX_BUF_LEN_S);
978 /* get mac_address */
979 mac_addr_tmp = __le32_to_cpu(req->param[2]);
980 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
981 HCLGE_CFG_MAC_ADDR_H_M,
982 HCLGE_CFG_MAC_ADDR_H_S);
984 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
986 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
987 HCLGE_CFG_DEFAULT_SPEED_M,
988 HCLGE_CFG_DEFAULT_SPEED_S);
989 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
990 HCLGE_CFG_RSS_SIZE_M,
991 HCLGE_CFG_RSS_SIZE_S);
993 for (i = 0; i < ETH_ALEN; i++)
994 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
996 req = (struct hclge_cfg_param_cmd *)desc[1].data;
997 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1000 /* hclge_get_cfg: query the static parameter from flash
1001 * @hdev: pointer to struct hclge_dev
1002 * @hcfg: the config structure to be getted
1004 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1006 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1007 struct hclge_cfg_param_cmd *req;
1008 int i, ret;
1010 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1011 u32 offset = 0;
1013 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1014 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1015 true);
1016 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1017 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1018 /* Len should be united by 4 bytes when send to hardware */
1019 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1020 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1021 req->offset = cpu_to_le32(offset);
1024 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1025 if (ret) {
1026 dev_err(&hdev->pdev->dev,
1027 "get config failed %d.\n", ret);
1028 return ret;
1031 hclge_parse_cfg(hcfg, desc);
1032 return 0;
1035 static int hclge_get_cap(struct hclge_dev *hdev)
1037 int ret;
1039 ret = hclge_query_function_status(hdev);
1040 if (ret) {
1041 dev_err(&hdev->pdev->dev,
1042 "query function status error %d.\n", ret);
1043 return ret;
1046 /* get pf resource */
1047 ret = hclge_query_pf_resource(hdev);
1048 if (ret) {
1049 dev_err(&hdev->pdev->dev,
1050 "query pf resource error %d.\n", ret);
1051 return ret;
1054 return 0;
1057 static int hclge_configure(struct hclge_dev *hdev)
1059 struct hclge_cfg cfg;
1060 int ret, i;
1062 ret = hclge_get_cfg(hdev, &cfg);
1063 if (ret) {
1064 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1065 return ret;
1068 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1069 hdev->base_tqp_pid = 0;
1070 hdev->rss_size_max = cfg.rss_size_max;
1071 hdev->rx_buf_len = cfg.rx_buf_len;
1072 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1073 hdev->hw.mac.media_type = cfg.media_type;
1074 hdev->hw.mac.phy_addr = cfg.phy_addr;
1075 hdev->num_desc = cfg.tqp_desc_num;
1076 hdev->tm_info.num_pg = 1;
1077 hdev->tc_max = cfg.tc_num;
1078 hdev->tm_info.hw_pfc_map = 0;
1080 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1081 if (ret) {
1082 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1083 return ret;
1086 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1087 (hdev->tc_max < 1)) {
1088 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1089 hdev->tc_max);
1090 hdev->tc_max = 1;
1093 /* Dev does not support DCB */
1094 if (!hnae3_dev_dcb_supported(hdev)) {
1095 hdev->tc_max = 1;
1096 hdev->pfc_max = 0;
1097 } else {
1098 hdev->pfc_max = hdev->tc_max;
1101 hdev->tm_info.num_tc = hdev->tc_max;
1103 /* Currently not support uncontiuous tc */
1104 for (i = 0; i < hdev->tm_info.num_tc; i++)
1105 hnae_set_bit(hdev->hw_tc_map, i, 1);
1107 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1109 return ret;
1112 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1113 int tso_mss_max)
1115 struct hclge_cfg_tso_status_cmd *req;
1116 struct hclge_desc desc;
1117 u16 tso_mss;
1119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1121 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1123 tso_mss = 0;
1124 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1125 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1126 req->tso_mss_min = cpu_to_le16(tso_mss);
1128 tso_mss = 0;
1129 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1130 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1131 req->tso_mss_max = cpu_to_le16(tso_mss);
1133 return hclge_cmd_send(&hdev->hw, &desc, 1);
1136 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1138 struct hclge_tqp *tqp;
1139 int i;
1141 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1142 sizeof(struct hclge_tqp), GFP_KERNEL);
1143 if (!hdev->htqp)
1144 return -ENOMEM;
1146 tqp = hdev->htqp;
1148 for (i = 0; i < hdev->num_tqps; i++) {
1149 tqp->dev = &hdev->pdev->dev;
1150 tqp->index = i;
1152 tqp->q.ae_algo = &ae_algo;
1153 tqp->q.buf_size = hdev->rx_buf_len;
1154 tqp->q.desc_num = hdev->num_desc;
1155 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1156 i * HCLGE_TQP_REG_SIZE;
1158 tqp++;
1161 return 0;
1164 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1165 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1167 struct hclge_tqp_map_cmd *req;
1168 struct hclge_desc desc;
1169 int ret;
1171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1173 req = (struct hclge_tqp_map_cmd *)desc.data;
1174 req->tqp_id = cpu_to_le16(tqp_pid);
1175 req->tqp_vf = func_id;
1176 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1177 1 << HCLGE_TQP_MAP_EN_B;
1178 req->tqp_vid = cpu_to_le16(tqp_vid);
1180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1181 if (ret) {
1182 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1183 ret);
1184 return ret;
1187 return 0;
1190 static int hclge_assign_tqp(struct hclge_vport *vport,
1191 struct hnae3_queue **tqp, u16 num_tqps)
1193 struct hclge_dev *hdev = vport->back;
1194 int i, alloced;
1196 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1197 alloced < num_tqps; i++) {
1198 if (!hdev->htqp[i].alloced) {
1199 hdev->htqp[i].q.handle = &vport->nic;
1200 hdev->htqp[i].q.tqp_index = alloced;
1201 tqp[alloced] = &hdev->htqp[i].q;
1202 hdev->htqp[i].alloced = true;
1203 alloced++;
1206 vport->alloc_tqps = num_tqps;
1208 return 0;
1211 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1213 struct hnae3_handle *nic = &vport->nic;
1214 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1215 struct hclge_dev *hdev = vport->back;
1216 int i, ret;
1218 kinfo->num_desc = hdev->num_desc;
1219 kinfo->rx_buf_len = hdev->rx_buf_len;
1220 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1221 kinfo->rss_size
1222 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1223 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1225 for (i = 0; i < HNAE3_MAX_TC; i++) {
1226 if (hdev->hw_tc_map & BIT(i)) {
1227 kinfo->tc_info[i].enable = true;
1228 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1229 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1230 kinfo->tc_info[i].tc = i;
1231 } else {
1232 /* Set to default queue if TC is disable */
1233 kinfo->tc_info[i].enable = false;
1234 kinfo->tc_info[i].tqp_offset = 0;
1235 kinfo->tc_info[i].tqp_count = 1;
1236 kinfo->tc_info[i].tc = 0;
1240 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1241 sizeof(struct hnae3_queue *), GFP_KERNEL);
1242 if (!kinfo->tqp)
1243 return -ENOMEM;
1245 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1246 if (ret) {
1247 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1248 return -EINVAL;
1251 return 0;
1254 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1255 struct hclge_vport *vport)
1257 struct hnae3_handle *nic = &vport->nic;
1258 struct hnae3_knic_private_info *kinfo;
1259 u16 i;
1261 kinfo = &nic->kinfo;
1262 for (i = 0; i < kinfo->num_tqps; i++) {
1263 struct hclge_tqp *q =
1264 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1265 bool is_pf;
1266 int ret;
1268 is_pf = !(vport->vport_id);
1269 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1270 i, is_pf);
1271 if (ret)
1272 return ret;
1275 return 0;
1278 static int hclge_map_tqp(struct hclge_dev *hdev)
1280 struct hclge_vport *vport = hdev->vport;
1281 u16 i, num_vport;
1283 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1284 for (i = 0; i < num_vport; i++) {
1285 int ret;
1287 ret = hclge_map_tqp_to_vport(hdev, vport);
1288 if (ret)
1289 return ret;
1291 vport++;
1294 return 0;
1297 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1299 /* this would be initialized later */
1302 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1304 struct hnae3_handle *nic = &vport->nic;
1305 struct hclge_dev *hdev = vport->back;
1306 int ret;
1308 nic->pdev = hdev->pdev;
1309 nic->ae_algo = &ae_algo;
1310 nic->numa_node_mask = hdev->numa_node_mask;
1312 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1313 ret = hclge_knic_setup(vport, num_tqps);
1314 if (ret) {
1315 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1316 ret);
1317 return ret;
1319 } else {
1320 hclge_unic_setup(vport, num_tqps);
1323 return 0;
1326 static int hclge_alloc_vport(struct hclge_dev *hdev)
1328 struct pci_dev *pdev = hdev->pdev;
1329 struct hclge_vport *vport;
1330 u32 tqp_main_vport;
1331 u32 tqp_per_vport;
1332 int num_vport, i;
1333 int ret;
1335 /* We need to alloc a vport for main NIC of PF */
1336 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1338 if (hdev->num_tqps < num_vport)
1339 num_vport = hdev->num_tqps;
1341 /* Alloc the same number of TQPs for every vport */
1342 tqp_per_vport = hdev->num_tqps / num_vport;
1343 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1346 GFP_KERNEL);
1347 if (!vport)
1348 return -ENOMEM;
1350 hdev->vport = vport;
1351 hdev->num_alloc_vport = num_vport;
1353 #ifdef CONFIG_PCI_IOV
1354 /* Enable SRIOV */
1355 if (hdev->num_req_vfs) {
1356 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1357 hdev->num_req_vfs);
1358 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1359 if (ret) {
1360 hdev->num_alloc_vfs = 0;
1361 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1362 ret);
1363 return ret;
1366 hdev->num_alloc_vfs = hdev->num_req_vfs;
1367 #endif
1369 for (i = 0; i < num_vport; i++) {
1370 vport->back = hdev;
1371 vport->vport_id = i;
1373 if (i == 0)
1374 ret = hclge_vport_setup(vport, tqp_main_vport);
1375 else
1376 ret = hclge_vport_setup(vport, tqp_per_vport);
1377 if (ret) {
1378 dev_err(&pdev->dev,
1379 "vport setup failed for vport %d, %d\n",
1380 i, ret);
1381 return ret;
1384 vport++;
1387 return 0;
1390 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1391 struct hclge_pkt_buf_alloc *buf_alloc)
1393 /* TX buffer size is unit by 128 byte */
1394 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1395 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1396 struct hclge_tx_buff_alloc_cmd *req;
1397 struct hclge_desc desc;
1398 int ret;
1399 u8 i;
1401 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1403 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1404 for (i = 0; i < HCLGE_TC_NUM; i++) {
1405 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1407 req->tx_pkt_buff[i] =
1408 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1409 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1412 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1413 if (ret) {
1414 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1415 ret);
1416 return ret;
1419 return 0;
1422 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1423 struct hclge_pkt_buf_alloc *buf_alloc)
1425 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1427 if (ret) {
1428 dev_err(&hdev->pdev->dev,
1429 "tx buffer alloc failed %d\n", ret);
1430 return ret;
1433 return 0;
1436 static int hclge_get_tc_num(struct hclge_dev *hdev)
1438 int i, cnt = 0;
1440 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1441 if (hdev->hw_tc_map & BIT(i))
1442 cnt++;
1443 return cnt;
1446 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1448 int i, cnt = 0;
1450 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1451 if (hdev->hw_tc_map & BIT(i) &&
1452 hdev->tm_info.hw_pfc_map & BIT(i))
1453 cnt++;
1454 return cnt;
1457 /* Get the number of pfc enabled TCs, which have private buffer */
1458 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1459 struct hclge_pkt_buf_alloc *buf_alloc)
1461 struct hclge_priv_buf *priv;
1462 int i, cnt = 0;
1464 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1465 priv = &buf_alloc->priv_buf[i];
1466 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1467 priv->enable)
1468 cnt++;
1471 return cnt;
1474 /* Get the number of pfc disabled TCs, which have private buffer */
1475 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1476 struct hclge_pkt_buf_alloc *buf_alloc)
1478 struct hclge_priv_buf *priv;
1479 int i, cnt = 0;
1481 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1482 priv = &buf_alloc->priv_buf[i];
1483 if (hdev->hw_tc_map & BIT(i) &&
1484 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1485 priv->enable)
1486 cnt++;
1489 return cnt;
1492 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1494 struct hclge_priv_buf *priv;
1495 u32 rx_priv = 0;
1496 int i;
1498 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1499 priv = &buf_alloc->priv_buf[i];
1500 if (priv->enable)
1501 rx_priv += priv->buf_size;
1503 return rx_priv;
1506 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1508 u32 i, total_tx_size = 0;
1510 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1511 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1513 return total_tx_size;
1516 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1517 struct hclge_pkt_buf_alloc *buf_alloc,
1518 u32 rx_all)
1520 u32 shared_buf_min, shared_buf_tc, shared_std;
1521 int tc_num, pfc_enable_num;
1522 u32 shared_buf;
1523 u32 rx_priv;
1524 int i;
1526 tc_num = hclge_get_tc_num(hdev);
1527 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1529 if (hnae3_dev_dcb_supported(hdev))
1530 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1531 else
1532 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1534 shared_buf_tc = pfc_enable_num * hdev->mps +
1535 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1536 hdev->mps;
1537 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1539 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1540 if (rx_all <= rx_priv + shared_std)
1541 return false;
1543 shared_buf = rx_all - rx_priv;
1544 buf_alloc->s_buf.buf_size = shared_buf;
1545 buf_alloc->s_buf.self.high = shared_buf;
1546 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1548 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1549 if ((hdev->hw_tc_map & BIT(i)) &&
1550 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1551 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1552 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1553 } else {
1554 buf_alloc->s_buf.tc_thrd[i].low = 0;
1555 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1559 return true;
1562 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1563 struct hclge_pkt_buf_alloc *buf_alloc)
1565 u32 i, total_size;
1567 total_size = hdev->pkt_buf_size;
1569 /* alloc tx buffer for all enabled tc */
1570 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1571 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1573 if (total_size < HCLGE_DEFAULT_TX_BUF)
1574 return -ENOMEM;
1576 if (hdev->hw_tc_map & BIT(i))
1577 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1578 else
1579 priv->tx_buf_size = 0;
1581 total_size -= priv->tx_buf_size;
1584 return 0;
1587 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1588 * @hdev: pointer to struct hclge_dev
1589 * @buf_alloc: pointer to buffer calculation data
1590 * @return: 0: calculate sucessful, negative: fail
1592 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1593 struct hclge_pkt_buf_alloc *buf_alloc)
1595 u32 rx_all = hdev->pkt_buf_size;
1596 int no_pfc_priv_num, pfc_priv_num;
1597 struct hclge_priv_buf *priv;
1598 int i;
1600 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1602 /* When DCB is not supported, rx private
1603 * buffer is not allocated.
1605 if (!hnae3_dev_dcb_supported(hdev)) {
1606 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1607 return -ENOMEM;
1609 return 0;
1612 /* step 1, try to alloc private buffer for all enabled tc */
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if (hdev->hw_tc_map & BIT(i)) {
1616 priv->enable = 1;
1617 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1618 priv->wl.low = hdev->mps;
1619 priv->wl.high = priv->wl.low + hdev->mps;
1620 priv->buf_size = priv->wl.high +
1621 HCLGE_DEFAULT_DV;
1622 } else {
1623 priv->wl.low = 0;
1624 priv->wl.high = 2 * hdev->mps;
1625 priv->buf_size = priv->wl.high;
1627 } else {
1628 priv->enable = 0;
1629 priv->wl.low = 0;
1630 priv->wl.high = 0;
1631 priv->buf_size = 0;
1635 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1636 return 0;
1638 /* step 2, try to decrease the buffer size of
1639 * no pfc TC's private buffer
1641 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1642 priv = &buf_alloc->priv_buf[i];
1644 priv->enable = 0;
1645 priv->wl.low = 0;
1646 priv->wl.high = 0;
1647 priv->buf_size = 0;
1649 if (!(hdev->hw_tc_map & BIT(i)))
1650 continue;
1652 priv->enable = 1;
1654 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1655 priv->wl.low = 128;
1656 priv->wl.high = priv->wl.low + hdev->mps;
1657 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1658 } else {
1659 priv->wl.low = 0;
1660 priv->wl.high = hdev->mps;
1661 priv->buf_size = priv->wl.high;
1665 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1666 return 0;
1668 /* step 3, try to reduce the number of pfc disabled TCs,
1669 * which have private buffer
1671 /* get the total no pfc enable TC number, which have private buffer */
1672 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1674 /* let the last to be cleared first */
1675 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1676 priv = &buf_alloc->priv_buf[i];
1678 if (hdev->hw_tc_map & BIT(i) &&
1679 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1680 /* Clear the no pfc TC private buffer */
1681 priv->wl.low = 0;
1682 priv->wl.high = 0;
1683 priv->buf_size = 0;
1684 priv->enable = 0;
1685 no_pfc_priv_num--;
1688 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1689 no_pfc_priv_num == 0)
1690 break;
1693 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1694 return 0;
1696 /* step 4, try to reduce the number of pfc enabled TCs
1697 * which have private buffer.
1699 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1701 /* let the last to be cleared first */
1702 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1703 priv = &buf_alloc->priv_buf[i];
1705 if (hdev->hw_tc_map & BIT(i) &&
1706 hdev->tm_info.hw_pfc_map & BIT(i)) {
1707 /* Reduce the number of pfc TC with private buffer */
1708 priv->wl.low = 0;
1709 priv->enable = 0;
1710 priv->wl.high = 0;
1711 priv->buf_size = 0;
1712 pfc_priv_num--;
1715 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1716 pfc_priv_num == 0)
1717 break;
1719 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1720 return 0;
1722 return -ENOMEM;
1725 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1728 struct hclge_rx_priv_buff_cmd *req;
1729 struct hclge_desc desc;
1730 int ret;
1731 int i;
1733 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1734 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1736 /* Alloc private buffer TCs */
1737 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1738 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1740 req->buf_num[i] =
1741 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1742 req->buf_num[i] |=
1743 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1746 req->shared_buf =
1747 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1748 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1751 if (ret) {
1752 dev_err(&hdev->pdev->dev,
1753 "rx private buffer alloc cmd failed %d\n", ret);
1754 return ret;
1757 return 0;
1760 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1762 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
1765 struct hclge_rx_priv_wl_buf *req;
1766 struct hclge_priv_buf *priv;
1767 struct hclge_desc desc[2];
1768 int i, j;
1769 int ret;
1771 for (i = 0; i < 2; i++) {
1772 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1773 false);
1774 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1776 /* The first descriptor set the NEXT bit to 1 */
1777 if (i == 0)
1778 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1779 else
1780 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1782 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1783 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1785 priv = &buf_alloc->priv_buf[idx];
1786 req->tc_wl[j].high =
1787 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1788 req->tc_wl[j].high |=
1789 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1790 HCLGE_RX_PRIV_EN_B);
1791 req->tc_wl[j].low =
1792 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1793 req->tc_wl[j].low |=
1794 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1795 HCLGE_RX_PRIV_EN_B);
1799 /* Send 2 descriptor at one time */
1800 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1801 if (ret) {
1802 dev_err(&hdev->pdev->dev,
1803 "rx private waterline config cmd failed %d\n",
1804 ret);
1805 return ret;
1807 return 0;
1810 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1811 struct hclge_pkt_buf_alloc *buf_alloc)
1813 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1814 struct hclge_rx_com_thrd *req;
1815 struct hclge_desc desc[2];
1816 struct hclge_tc_thrd *tc;
1817 int i, j;
1818 int ret;
1820 for (i = 0; i < 2; i++) {
1821 hclge_cmd_setup_basic_desc(&desc[i],
1822 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1823 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1825 /* The first descriptor set the NEXT bit to 1 */
1826 if (i == 0)
1827 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1828 else
1829 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1831 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1832 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1834 req->com_thrd[j].high =
1835 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1836 req->com_thrd[j].high |=
1837 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1838 HCLGE_RX_PRIV_EN_B);
1839 req->com_thrd[j].low =
1840 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1841 req->com_thrd[j].low |=
1842 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1843 HCLGE_RX_PRIV_EN_B);
1847 /* Send 2 descriptors at one time */
1848 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1849 if (ret) {
1850 dev_err(&hdev->pdev->dev,
1851 "common threshold config cmd failed %d\n", ret);
1852 return ret;
1854 return 0;
1857 static int hclge_common_wl_config(struct hclge_dev *hdev,
1858 struct hclge_pkt_buf_alloc *buf_alloc)
1860 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1861 struct hclge_rx_com_wl *req;
1862 struct hclge_desc desc;
1863 int ret;
1865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1867 req = (struct hclge_rx_com_wl *)desc.data;
1868 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1869 req->com_wl.high |=
1870 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1871 HCLGE_RX_PRIV_EN_B);
1873 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1874 req->com_wl.low |=
1875 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1876 HCLGE_RX_PRIV_EN_B);
1878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1879 if (ret) {
1880 dev_err(&hdev->pdev->dev,
1881 "common waterline config cmd failed %d\n", ret);
1882 return ret;
1885 return 0;
1888 int hclge_buffer_alloc(struct hclge_dev *hdev)
1890 struct hclge_pkt_buf_alloc *pkt_buf;
1891 int ret;
1893 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1894 if (!pkt_buf)
1895 return -ENOMEM;
1897 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1898 if (ret) {
1899 dev_err(&hdev->pdev->dev,
1900 "could not calc tx buffer size for all TCs %d\n", ret);
1901 goto out;
1904 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1905 if (ret) {
1906 dev_err(&hdev->pdev->dev,
1907 "could not alloc tx buffers %d\n", ret);
1908 goto out;
1911 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1912 if (ret) {
1913 dev_err(&hdev->pdev->dev,
1914 "could not calc rx priv buffer size for all TCs %d\n",
1915 ret);
1916 goto out;
1919 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1920 if (ret) {
1921 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1922 ret);
1923 goto out;
1926 if (hnae3_dev_dcb_supported(hdev)) {
1927 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1928 if (ret) {
1929 dev_err(&hdev->pdev->dev,
1930 "could not configure rx private waterline %d\n",
1931 ret);
1932 goto out;
1935 ret = hclge_common_thrd_config(hdev, pkt_buf);
1936 if (ret) {
1937 dev_err(&hdev->pdev->dev,
1938 "could not configure common threshold %d\n",
1939 ret);
1940 goto out;
1944 ret = hclge_common_wl_config(hdev, pkt_buf);
1945 if (ret)
1946 dev_err(&hdev->pdev->dev,
1947 "could not configure common waterline %d\n", ret);
1949 out:
1950 kfree(pkt_buf);
1951 return ret;
1954 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1956 struct hnae3_handle *roce = &vport->roce;
1957 struct hnae3_handle *nic = &vport->nic;
1959 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1961 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1962 vport->back->num_msi_left == 0)
1963 return -EINVAL;
1965 roce->rinfo.base_vector = vport->back->roce_base_vector;
1967 roce->rinfo.netdev = nic->kinfo.netdev;
1968 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1970 roce->pdev = nic->pdev;
1971 roce->ae_algo = nic->ae_algo;
1972 roce->numa_node_mask = nic->numa_node_mask;
1974 return 0;
1977 static int hclge_init_msi(struct hclge_dev *hdev)
1979 struct pci_dev *pdev = hdev->pdev;
1980 int vectors;
1981 int i;
1983 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1984 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1985 if (vectors < 0) {
1986 dev_err(&pdev->dev,
1987 "failed(%d) to allocate MSI/MSI-X vectors\n",
1988 vectors);
1989 return vectors;
1991 if (vectors < hdev->num_msi)
1992 dev_warn(&hdev->pdev->dev,
1993 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1994 hdev->num_msi, vectors);
1996 hdev->num_msi = vectors;
1997 hdev->num_msi_left = vectors;
1998 hdev->base_msi_vector = pdev->irq;
1999 hdev->roce_base_vector = hdev->base_msi_vector +
2000 HCLGE_ROCE_VECTOR_OFFSET;
2002 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2003 sizeof(u16), GFP_KERNEL);
2004 if (!hdev->vector_status) {
2005 pci_free_irq_vectors(pdev);
2006 return -ENOMEM;
2009 for (i = 0; i < hdev->num_msi; i++)
2010 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2012 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2013 sizeof(int), GFP_KERNEL);
2014 if (!hdev->vector_irq) {
2015 pci_free_irq_vectors(pdev);
2016 return -ENOMEM;
2019 return 0;
2022 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2024 struct hclge_mac *mac = &hdev->hw.mac;
2026 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2027 mac->duplex = (u8)duplex;
2028 else
2029 mac->duplex = HCLGE_MAC_FULL;
2031 mac->speed = speed;
2034 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2036 struct hclge_config_mac_speed_dup_cmd *req;
2037 struct hclge_desc desc;
2038 int ret;
2040 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2044 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2046 switch (speed) {
2047 case HCLGE_MAC_SPEED_10M:
2048 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2049 HCLGE_CFG_SPEED_S, 6);
2050 break;
2051 case HCLGE_MAC_SPEED_100M:
2052 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2053 HCLGE_CFG_SPEED_S, 7);
2054 break;
2055 case HCLGE_MAC_SPEED_1G:
2056 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2057 HCLGE_CFG_SPEED_S, 0);
2058 break;
2059 case HCLGE_MAC_SPEED_10G:
2060 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2061 HCLGE_CFG_SPEED_S, 1);
2062 break;
2063 case HCLGE_MAC_SPEED_25G:
2064 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2065 HCLGE_CFG_SPEED_S, 2);
2066 break;
2067 case HCLGE_MAC_SPEED_40G:
2068 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2069 HCLGE_CFG_SPEED_S, 3);
2070 break;
2071 case HCLGE_MAC_SPEED_50G:
2072 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2073 HCLGE_CFG_SPEED_S, 4);
2074 break;
2075 case HCLGE_MAC_SPEED_100G:
2076 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2077 HCLGE_CFG_SPEED_S, 5);
2078 break;
2079 default:
2080 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2081 return -EINVAL;
2084 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2087 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2088 if (ret) {
2089 dev_err(&hdev->pdev->dev,
2090 "mac speed/duplex config cmd failed %d.\n", ret);
2091 return ret;
2094 hclge_check_speed_dup(hdev, duplex, speed);
2096 return 0;
2099 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2100 u8 duplex)
2102 struct hclge_vport *vport = hclge_get_vport(handle);
2103 struct hclge_dev *hdev = vport->back;
2105 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2108 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2109 u8 *duplex)
2111 struct hclge_query_an_speed_dup_cmd *req;
2112 struct hclge_desc desc;
2113 int speed_tmp;
2114 int ret;
2116 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 if (ret) {
2121 dev_err(&hdev->pdev->dev,
2122 "mac speed/autoneg/duplex query cmd failed %d\n",
2123 ret);
2124 return ret;
2127 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2128 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2129 HCLGE_QUERY_SPEED_S);
2131 ret = hclge_parse_speed(speed_tmp, speed);
2132 if (ret) {
2133 dev_err(&hdev->pdev->dev,
2134 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2135 return -EIO;
2138 return 0;
2141 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2143 struct hclge_config_auto_neg_cmd *req;
2144 struct hclge_desc desc;
2145 u32 flag = 0;
2146 int ret;
2148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2150 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2151 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2152 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2154 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2155 if (ret) {
2156 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2157 ret);
2158 return ret;
2161 return 0;
2164 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2166 struct hclge_vport *vport = hclge_get_vport(handle);
2167 struct hclge_dev *hdev = vport->back;
2169 return hclge_set_autoneg_en(hdev, enable);
2172 static int hclge_get_autoneg(struct hnae3_handle *handle)
2174 struct hclge_vport *vport = hclge_get_vport(handle);
2175 struct hclge_dev *hdev = vport->back;
2176 struct phy_device *phydev = hdev->hw.mac.phydev;
2178 if (phydev)
2179 return phydev->autoneg;
2181 return hdev->hw.mac.autoneg;
2184 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2185 bool mask_vlan,
2186 u8 *mac_mask)
2188 struct hclge_mac_vlan_mask_entry_cmd *req;
2189 struct hclge_desc desc;
2190 int status;
2192 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2195 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2196 mask_vlan ? 1 : 0);
2197 ether_addr_copy(req->mac_mask, mac_mask);
2199 status = hclge_cmd_send(&hdev->hw, &desc, 1);
2200 if (status)
2201 dev_err(&hdev->pdev->dev,
2202 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2203 status);
2205 return status;
2208 static int hclge_mac_init(struct hclge_dev *hdev)
2210 struct hclge_mac *mac = &hdev->hw.mac;
2211 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2212 int ret;
2214 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2215 if (ret) {
2216 dev_err(&hdev->pdev->dev,
2217 "Config mac speed dup fail ret=%d\n", ret);
2218 return ret;
2221 mac->link = 0;
2223 /* Initialize the MTA table work mode */
2224 hdev->accept_mta_mc = true;
2225 hdev->enable_mta = true;
2226 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2228 ret = hclge_set_mta_filter_mode(hdev,
2229 hdev->mta_mac_sel_type,
2230 hdev->enable_mta);
2231 if (ret) {
2232 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2233 ret);
2234 return ret;
2237 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2238 if (ret) {
2239 dev_err(&hdev->pdev->dev,
2240 "set mta filter mode fail ret=%d\n", ret);
2241 return ret;
2244 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2245 if (ret)
2246 dev_err(&hdev->pdev->dev,
2247 "set default mac_vlan_mask fail ret=%d\n", ret);
2249 return ret;
2252 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2254 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2255 schedule_work(&hdev->mbx_service_task);
2258 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2260 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2261 schedule_work(&hdev->rst_service_task);
2264 static void hclge_task_schedule(struct hclge_dev *hdev)
2266 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2267 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2268 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2269 (void)schedule_work(&hdev->service_task);
2272 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2274 struct hclge_link_status_cmd *req;
2275 struct hclge_desc desc;
2276 int link_status;
2277 int ret;
2279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2280 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2281 if (ret) {
2282 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2283 ret);
2284 return ret;
2287 req = (struct hclge_link_status_cmd *)desc.data;
2288 link_status = req->status & HCLGE_LINK_STATUS;
2290 return !!link_status;
2293 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2295 int mac_state;
2296 int link_stat;
2298 mac_state = hclge_get_mac_link_status(hdev);
2300 if (hdev->hw.mac.phydev) {
2301 if (!genphy_read_status(hdev->hw.mac.phydev))
2302 link_stat = mac_state &
2303 hdev->hw.mac.phydev->link;
2304 else
2305 link_stat = 0;
2307 } else {
2308 link_stat = mac_state;
2311 return !!link_stat;
2314 static void hclge_update_link_status(struct hclge_dev *hdev)
2316 struct hnae3_client *client = hdev->nic_client;
2317 struct hnae3_handle *handle;
2318 int state;
2319 int i;
2321 if (!client)
2322 return;
2323 state = hclge_get_mac_phy_link(hdev);
2324 if (state != hdev->hw.mac.link) {
2325 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2326 handle = &hdev->vport[i].nic;
2327 client->ops->link_status_change(handle, state);
2329 hdev->hw.mac.link = state;
2333 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2335 struct hclge_mac mac = hdev->hw.mac;
2336 u8 duplex;
2337 int speed;
2338 int ret;
2340 /* get the speed and duplex as autoneg'result from mac cmd when phy
2341 * doesn't exit.
2343 if (mac.phydev || !mac.autoneg)
2344 return 0;
2346 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2347 if (ret) {
2348 dev_err(&hdev->pdev->dev,
2349 "mac autoneg/speed/duplex query failed %d\n", ret);
2350 return ret;
2353 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2354 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2355 if (ret) {
2356 dev_err(&hdev->pdev->dev,
2357 "mac speed/duplex config failed %d\n", ret);
2358 return ret;
2362 return 0;
2365 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2367 struct hclge_vport *vport = hclge_get_vport(handle);
2368 struct hclge_dev *hdev = vport->back;
2370 return hclge_update_speed_duplex(hdev);
2373 static int hclge_get_status(struct hnae3_handle *handle)
2375 struct hclge_vport *vport = hclge_get_vport(handle);
2376 struct hclge_dev *hdev = vport->back;
2378 hclge_update_link_status(hdev);
2380 return hdev->hw.mac.link;
2383 static void hclge_service_timer(struct timer_list *t)
2385 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2387 mod_timer(&hdev->service_timer, jiffies + HZ);
2388 hdev->hw_stats.stats_timer++;
2389 hclge_task_schedule(hdev);
2392 static void hclge_service_complete(struct hclge_dev *hdev)
2394 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2396 /* Flush memory before next watchdog */
2397 smp_mb__before_atomic();
2398 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2401 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2403 u32 rst_src_reg;
2404 u32 cmdq_src_reg;
2406 /* fetch the events from their corresponding regs */
2407 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2408 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2410 /* Assumption: If by any chance reset and mailbox events are reported
2411 * together then we will only process reset event in this go and will
2412 * defer the processing of the mailbox events. Since, we would have not
2413 * cleared RX CMDQ event this time we would receive again another
2414 * interrupt from H/W just for the mailbox.
2417 /* check for vector0 reset event sources */
2418 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2419 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2420 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2421 return HCLGE_VECTOR0_EVENT_RST;
2424 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2425 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2426 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2427 return HCLGE_VECTOR0_EVENT_RST;
2430 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2431 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2432 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2433 return HCLGE_VECTOR0_EVENT_RST;
2436 /* check for vector0 mailbox(=CMDQ RX) event source */
2437 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2438 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2439 *clearval = cmdq_src_reg;
2440 return HCLGE_VECTOR0_EVENT_MBX;
2443 return HCLGE_VECTOR0_EVENT_OTHER;
2446 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2447 u32 regclr)
2449 switch (event_type) {
2450 case HCLGE_VECTOR0_EVENT_RST:
2451 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2452 break;
2453 case HCLGE_VECTOR0_EVENT_MBX:
2454 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2455 break;
2459 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2461 writel(enable ? 1 : 0, vector->addr);
2464 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2466 struct hclge_dev *hdev = data;
2467 u32 event_cause;
2468 u32 clearval;
2470 hclge_enable_vector(&hdev->misc_vector, false);
2471 event_cause = hclge_check_event_cause(hdev, &clearval);
2473 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2474 switch (event_cause) {
2475 case HCLGE_VECTOR0_EVENT_RST:
2476 hclge_reset_task_schedule(hdev);
2477 break;
2478 case HCLGE_VECTOR0_EVENT_MBX:
2479 /* If we are here then,
2480 * 1. Either we are not handling any mbx task and we are not
2481 * scheduled as well
2482 * OR
2483 * 2. We could be handling a mbx task but nothing more is
2484 * scheduled.
2485 * In both cases, we should schedule mbx task as there are more
2486 * mbx messages reported by this interrupt.
2488 hclge_mbx_task_schedule(hdev);
2490 default:
2491 dev_dbg(&hdev->pdev->dev,
2492 "received unknown or unhandled event of vector0\n");
2493 break;
2496 /* we should clear the source of interrupt */
2497 hclge_clear_event_cause(hdev, event_cause, clearval);
2498 hclge_enable_vector(&hdev->misc_vector, true);
2500 return IRQ_HANDLED;
2503 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2505 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2506 hdev->num_msi_left += 1;
2507 hdev->num_msi_used -= 1;
2510 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2512 struct hclge_misc_vector *vector = &hdev->misc_vector;
2514 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2516 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2517 hdev->vector_status[0] = 0;
2519 hdev->num_msi_left -= 1;
2520 hdev->num_msi_used += 1;
2523 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2525 int ret;
2527 hclge_get_misc_vector(hdev);
2529 /* this would be explicitly freed in the end */
2530 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2531 0, "hclge_misc", hdev);
2532 if (ret) {
2533 hclge_free_vector(hdev, 0);
2534 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2535 hdev->misc_vector.vector_irq);
2538 return ret;
2541 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2543 free_irq(hdev->misc_vector.vector_irq, hdev);
2544 hclge_free_vector(hdev, 0);
2547 static int hclge_notify_client(struct hclge_dev *hdev,
2548 enum hnae3_reset_notify_type type)
2550 struct hnae3_client *client = hdev->nic_client;
2551 u16 i;
2553 if (!client->ops->reset_notify)
2554 return -EOPNOTSUPP;
2556 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2557 struct hnae3_handle *handle = &hdev->vport[i].nic;
2558 int ret;
2560 ret = client->ops->reset_notify(handle, type);
2561 if (ret)
2562 return ret;
2565 return 0;
2568 static int hclge_reset_wait(struct hclge_dev *hdev)
2570 #define HCLGE_RESET_WATI_MS 100
2571 #define HCLGE_RESET_WAIT_CNT 5
2572 u32 val, reg, reg_bit;
2573 u32 cnt = 0;
2575 switch (hdev->reset_type) {
2576 case HNAE3_GLOBAL_RESET:
2577 reg = HCLGE_GLOBAL_RESET_REG;
2578 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2579 break;
2580 case HNAE3_CORE_RESET:
2581 reg = HCLGE_GLOBAL_RESET_REG;
2582 reg_bit = HCLGE_CORE_RESET_BIT;
2583 break;
2584 case HNAE3_FUNC_RESET:
2585 reg = HCLGE_FUN_RST_ING;
2586 reg_bit = HCLGE_FUN_RST_ING_B;
2587 break;
2588 default:
2589 dev_err(&hdev->pdev->dev,
2590 "Wait for unsupported reset type: %d\n",
2591 hdev->reset_type);
2592 return -EINVAL;
2595 val = hclge_read_dev(&hdev->hw, reg);
2596 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2597 msleep(HCLGE_RESET_WATI_MS);
2598 val = hclge_read_dev(&hdev->hw, reg);
2599 cnt++;
2602 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2603 dev_warn(&hdev->pdev->dev,
2604 "Wait for reset timeout: %d\n", hdev->reset_type);
2605 return -EBUSY;
2608 return 0;
2611 static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2613 struct hclge_desc desc;
2614 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2615 int ret;
2617 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2618 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
2619 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2620 req->fun_reset_vfid = func_id;
2622 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2623 if (ret)
2624 dev_err(&hdev->pdev->dev,
2625 "send function reset cmd fail, status =%d\n", ret);
2627 return ret;
2630 static void hclge_do_reset(struct hclge_dev *hdev)
2632 struct pci_dev *pdev = hdev->pdev;
2633 u32 val;
2635 switch (hdev->reset_type) {
2636 case HNAE3_GLOBAL_RESET:
2637 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2638 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2639 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2640 dev_info(&pdev->dev, "Global Reset requested\n");
2641 break;
2642 case HNAE3_CORE_RESET:
2643 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2644 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2645 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2646 dev_info(&pdev->dev, "Core Reset requested\n");
2647 break;
2648 case HNAE3_FUNC_RESET:
2649 dev_info(&pdev->dev, "PF Reset requested\n");
2650 hclge_func_reset_cmd(hdev, 0);
2651 /* schedule again to check later */
2652 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2653 hclge_reset_task_schedule(hdev);
2654 break;
2655 default:
2656 dev_warn(&pdev->dev,
2657 "Unsupported reset type: %d\n", hdev->reset_type);
2658 break;
2662 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2663 unsigned long *addr)
2665 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2667 /* return the highest priority reset level amongst all */
2668 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2669 rst_level = HNAE3_GLOBAL_RESET;
2670 else if (test_bit(HNAE3_CORE_RESET, addr))
2671 rst_level = HNAE3_CORE_RESET;
2672 else if (test_bit(HNAE3_IMP_RESET, addr))
2673 rst_level = HNAE3_IMP_RESET;
2674 else if (test_bit(HNAE3_FUNC_RESET, addr))
2675 rst_level = HNAE3_FUNC_RESET;
2677 /* now, clear all other resets */
2678 clear_bit(HNAE3_GLOBAL_RESET, addr);
2679 clear_bit(HNAE3_CORE_RESET, addr);
2680 clear_bit(HNAE3_IMP_RESET, addr);
2681 clear_bit(HNAE3_FUNC_RESET, addr);
2683 return rst_level;
2686 static void hclge_reset(struct hclge_dev *hdev)
2688 /* perform reset of the stack & ae device for a client */
2690 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2692 if (!hclge_reset_wait(hdev)) {
2693 rtnl_lock();
2694 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2695 hclge_reset_ae_dev(hdev->ae_dev);
2696 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2697 rtnl_unlock();
2698 } else {
2699 /* schedule again to check pending resets later */
2700 set_bit(hdev->reset_type, &hdev->reset_pending);
2701 hclge_reset_task_schedule(hdev);
2704 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2707 static void hclge_reset_event(struct hnae3_handle *handle,
2708 enum hnae3_reset_type reset)
2710 struct hclge_vport *vport = hclge_get_vport(handle);
2711 struct hclge_dev *hdev = vport->back;
2713 dev_info(&hdev->pdev->dev,
2714 "Receive reset event , reset_type is %d", reset);
2716 switch (reset) {
2717 case HNAE3_FUNC_RESET:
2718 case HNAE3_CORE_RESET:
2719 case HNAE3_GLOBAL_RESET:
2720 /* request reset & schedule reset task */
2721 set_bit(reset, &hdev->reset_request);
2722 hclge_reset_task_schedule(hdev);
2723 break;
2724 default:
2725 dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
2726 break;
2730 static void hclge_reset_subtask(struct hclge_dev *hdev)
2732 /* check if there is any ongoing reset in the hardware. This status can
2733 * be checked from reset_pending. If there is then, we need to wait for
2734 * hardware to complete reset.
2735 * a. If we are able to figure out in reasonable time that hardware
2736 * has fully resetted then, we can proceed with driver, client
2737 * reset.
2738 * b. else, we can come back later to check this status so re-sched
2739 * now.
2741 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2742 if (hdev->reset_type != HNAE3_NONE_RESET)
2743 hclge_reset(hdev);
2745 /* check if we got any *new* reset requests to be honored */
2746 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2747 if (hdev->reset_type != HNAE3_NONE_RESET)
2748 hclge_do_reset(hdev);
2750 hdev->reset_type = HNAE3_NONE_RESET;
2753 static void hclge_reset_service_task(struct work_struct *work)
2755 struct hclge_dev *hdev =
2756 container_of(work, struct hclge_dev, rst_service_task);
2758 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2759 return;
2761 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2763 hclge_reset_subtask(hdev);
2765 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2768 static void hclge_mailbox_service_task(struct work_struct *work)
2770 struct hclge_dev *hdev =
2771 container_of(work, struct hclge_dev, mbx_service_task);
2773 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2774 return;
2776 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2778 hclge_mbx_handler(hdev);
2780 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2783 static void hclge_service_task(struct work_struct *work)
2785 struct hclge_dev *hdev =
2786 container_of(work, struct hclge_dev, service_task);
2788 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2789 hclge_update_stats_for_all(hdev);
2790 hdev->hw_stats.stats_timer = 0;
2793 hclge_update_speed_duplex(hdev);
2794 hclge_update_link_status(hdev);
2795 hclge_service_complete(hdev);
2798 static void hclge_disable_sriov(struct hclge_dev *hdev)
2800 /* If our VFs are assigned we cannot shut down SR-IOV
2801 * without causing issues, so just leave the hardware
2802 * available but disabled
2804 if (pci_vfs_assigned(hdev->pdev)) {
2805 dev_warn(&hdev->pdev->dev,
2806 "disabling driver while VFs are assigned\n");
2807 return;
2810 pci_disable_sriov(hdev->pdev);
2813 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2815 /* VF handle has no client */
2816 if (!handle->client)
2817 return container_of(handle, struct hclge_vport, nic);
2818 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2819 return container_of(handle, struct hclge_vport, roce);
2820 else
2821 return container_of(handle, struct hclge_vport, nic);
2824 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2825 struct hnae3_vector_info *vector_info)
2827 struct hclge_vport *vport = hclge_get_vport(handle);
2828 struct hnae3_vector_info *vector = vector_info;
2829 struct hclge_dev *hdev = vport->back;
2830 int alloc = 0;
2831 int i, j;
2833 vector_num = min(hdev->num_msi_left, vector_num);
2835 for (j = 0; j < vector_num; j++) {
2836 for (i = 1; i < hdev->num_msi; i++) {
2837 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2838 vector->vector = pci_irq_vector(hdev->pdev, i);
2839 vector->io_addr = hdev->hw.io_base +
2840 HCLGE_VECTOR_REG_BASE +
2841 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2842 vport->vport_id *
2843 HCLGE_VECTOR_VF_OFFSET;
2844 hdev->vector_status[i] = vport->vport_id;
2845 hdev->vector_irq[i] = vector->vector;
2847 vector++;
2848 alloc++;
2850 break;
2854 hdev->num_msi_left -= alloc;
2855 hdev->num_msi_used += alloc;
2857 return alloc;
2860 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2862 int i;
2864 for (i = 0; i < hdev->num_msi; i++)
2865 if (vector == hdev->vector_irq[i])
2866 return i;
2868 return -EINVAL;
2871 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2873 return HCLGE_RSS_KEY_SIZE;
2876 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2878 return HCLGE_RSS_IND_TBL_SIZE;
2881 static int hclge_get_rss_algo(struct hclge_dev *hdev)
2883 struct hclge_rss_config_cmd *req;
2884 struct hclge_desc desc;
2885 int rss_hash_algo;
2886 int ret;
2888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2891 if (ret) {
2892 dev_err(&hdev->pdev->dev,
2893 "Get link status error, status =%d\n", ret);
2894 return ret;
2897 req = (struct hclge_rss_config_cmd *)desc.data;
2898 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2900 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2901 return ETH_RSS_HASH_TOP;
2903 return -EINVAL;
2906 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2907 const u8 hfunc, const u8 *key)
2909 struct hclge_rss_config_cmd *req;
2910 struct hclge_desc desc;
2911 int key_offset;
2912 int key_size;
2913 int ret;
2915 req = (struct hclge_rss_config_cmd *)desc.data;
2917 for (key_offset = 0; key_offset < 3; key_offset++) {
2918 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2919 false);
2921 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2922 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2924 if (key_offset == 2)
2925 key_size =
2926 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2927 else
2928 key_size = HCLGE_RSS_HASH_KEY_NUM;
2930 memcpy(req->hash_key,
2931 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2933 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2934 if (ret) {
2935 dev_err(&hdev->pdev->dev,
2936 "Configure RSS config fail, status = %d\n",
2937 ret);
2938 return ret;
2941 return 0;
2944 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2946 struct hclge_rss_indirection_table_cmd *req;
2947 struct hclge_desc desc;
2948 int i, j;
2949 int ret;
2951 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2953 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2954 hclge_cmd_setup_basic_desc
2955 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2957 req->start_table_index =
2958 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2959 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2961 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2962 req->rss_result[j] =
2963 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2965 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2966 if (ret) {
2967 dev_err(&hdev->pdev->dev,
2968 "Configure rss indir table fail,status = %d\n",
2969 ret);
2970 return ret;
2973 return 0;
2976 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2977 u16 *tc_size, u16 *tc_offset)
2979 struct hclge_rss_tc_mode_cmd *req;
2980 struct hclge_desc desc;
2981 int ret;
2982 int i;
2984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2985 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2988 u16 mode = 0;
2990 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2991 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2992 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2993 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2994 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2996 req->rss_tc_mode[i] = cpu_to_le16(mode);
2999 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3000 if (ret) {
3001 dev_err(&hdev->pdev->dev,
3002 "Configure rss tc mode fail, status = %d\n", ret);
3003 return ret;
3006 return 0;
3009 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3011 struct hclge_rss_input_tuple_cmd *req;
3012 struct hclge_desc desc;
3013 int ret;
3015 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3017 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3018 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3019 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3020 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
3021 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3022 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3023 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3024 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
3025 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3026 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3027 if (ret) {
3028 dev_err(&hdev->pdev->dev,
3029 "Configure rss input fail, status = %d\n", ret);
3030 return ret;
3033 return 0;
3036 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3037 u8 *key, u8 *hfunc)
3039 struct hclge_vport *vport = hclge_get_vport(handle);
3040 struct hclge_dev *hdev = vport->back;
3041 int i;
3043 /* Get hash algorithm */
3044 if (hfunc)
3045 *hfunc = hclge_get_rss_algo(hdev);
3047 /* Get the RSS Key required by the user */
3048 if (key)
3049 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3051 /* Get indirect table */
3052 if (indir)
3053 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3054 indir[i] = vport->rss_indirection_tbl[i];
3056 return 0;
3059 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3060 const u8 *key, const u8 hfunc)
3062 struct hclge_vport *vport = hclge_get_vport(handle);
3063 struct hclge_dev *hdev = vport->back;
3064 u8 hash_algo;
3065 int ret, i;
3067 /* Set the RSS Hash Key if specififed by the user */
3068 if (key) {
3069 /* Update the shadow RSS key with user specified qids */
3070 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3072 if (hfunc == ETH_RSS_HASH_TOP ||
3073 hfunc == ETH_RSS_HASH_NO_CHANGE)
3074 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3075 else
3076 return -EINVAL;
3077 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3078 if (ret)
3079 return ret;
3082 /* Update the shadow RSS table with user specified qids */
3083 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3084 vport->rss_indirection_tbl[i] = indir[i];
3086 /* Update the hardware */
3087 ret = hclge_set_rss_indir_table(hdev, indir);
3088 return ret;
3091 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3093 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3095 if (nfc->data & RXH_L4_B_2_3)
3096 hash_sets |= HCLGE_D_PORT_BIT;
3097 else
3098 hash_sets &= ~HCLGE_D_PORT_BIT;
3100 if (nfc->data & RXH_IP_SRC)
3101 hash_sets |= HCLGE_S_IP_BIT;
3102 else
3103 hash_sets &= ~HCLGE_S_IP_BIT;
3105 if (nfc->data & RXH_IP_DST)
3106 hash_sets |= HCLGE_D_IP_BIT;
3107 else
3108 hash_sets &= ~HCLGE_D_IP_BIT;
3110 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3111 hash_sets |= HCLGE_V_TAG_BIT;
3113 return hash_sets;
3116 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3117 struct ethtool_rxnfc *nfc)
3119 struct hclge_vport *vport = hclge_get_vport(handle);
3120 struct hclge_dev *hdev = vport->back;
3121 struct hclge_rss_input_tuple_cmd *req;
3122 struct hclge_desc desc;
3123 u8 tuple_sets;
3124 int ret;
3126 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3127 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3128 return -EINVAL;
3130 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
3132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3133 if (ret) {
3134 dev_err(&hdev->pdev->dev,
3135 "Read rss tuple fail, status = %d\n", ret);
3136 return ret;
3139 hclge_cmd_reuse_desc(&desc, false);
3141 tuple_sets = hclge_get_rss_hash_bits(nfc);
3142 switch (nfc->flow_type) {
3143 case TCP_V4_FLOW:
3144 req->ipv4_tcp_en = tuple_sets;
3145 break;
3146 case TCP_V6_FLOW:
3147 req->ipv6_tcp_en = tuple_sets;
3148 break;
3149 case UDP_V4_FLOW:
3150 req->ipv4_udp_en = tuple_sets;
3151 break;
3152 case UDP_V6_FLOW:
3153 req->ipv6_udp_en = tuple_sets;
3154 break;
3155 case SCTP_V4_FLOW:
3156 req->ipv4_sctp_en = tuple_sets;
3157 break;
3158 case SCTP_V6_FLOW:
3159 if ((nfc->data & RXH_L4_B_0_1) ||
3160 (nfc->data & RXH_L4_B_2_3))
3161 return -EINVAL;
3163 req->ipv6_sctp_en = tuple_sets;
3164 break;
3165 case IPV4_FLOW:
3166 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3167 break;
3168 case IPV6_FLOW:
3169 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3170 break;
3171 default:
3172 return -EINVAL;
3175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3176 if (ret)
3177 dev_err(&hdev->pdev->dev,
3178 "Set rss tuple fail, status = %d\n", ret);
3180 return ret;
3183 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3184 struct ethtool_rxnfc *nfc)
3186 struct hclge_vport *vport = hclge_get_vport(handle);
3187 struct hclge_dev *hdev = vport->back;
3188 struct hclge_rss_input_tuple_cmd *req;
3189 struct hclge_desc desc;
3190 u8 tuple_sets;
3191 int ret;
3193 nfc->data = 0;
3195 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
3197 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3198 if (ret) {
3199 dev_err(&hdev->pdev->dev,
3200 "Read rss tuple fail, status = %d\n", ret);
3201 return ret;
3204 switch (nfc->flow_type) {
3205 case TCP_V4_FLOW:
3206 tuple_sets = req->ipv4_tcp_en;
3207 break;
3208 case UDP_V4_FLOW:
3209 tuple_sets = req->ipv4_udp_en;
3210 break;
3211 case TCP_V6_FLOW:
3212 tuple_sets = req->ipv6_tcp_en;
3213 break;
3214 case UDP_V6_FLOW:
3215 tuple_sets = req->ipv6_udp_en;
3216 break;
3217 case SCTP_V4_FLOW:
3218 tuple_sets = req->ipv4_sctp_en;
3219 break;
3220 case SCTP_V6_FLOW:
3221 tuple_sets = req->ipv6_sctp_en;
3222 break;
3223 case IPV4_FLOW:
3224 case IPV6_FLOW:
3225 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3226 break;
3227 default:
3228 return -EINVAL;
3231 if (!tuple_sets)
3232 return 0;
3234 if (tuple_sets & HCLGE_D_PORT_BIT)
3235 nfc->data |= RXH_L4_B_2_3;
3236 if (tuple_sets & HCLGE_S_PORT_BIT)
3237 nfc->data |= RXH_L4_B_0_1;
3238 if (tuple_sets & HCLGE_D_IP_BIT)
3239 nfc->data |= RXH_IP_DST;
3240 if (tuple_sets & HCLGE_S_IP_BIT)
3241 nfc->data |= RXH_IP_SRC;
3243 return 0;
3246 static int hclge_get_tc_size(struct hnae3_handle *handle)
3248 struct hclge_vport *vport = hclge_get_vport(handle);
3249 struct hclge_dev *hdev = vport->back;
3251 return hdev->rss_size_max;
3254 int hclge_rss_init_hw(struct hclge_dev *hdev)
3256 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3257 struct hclge_vport *vport = hdev->vport;
3258 u16 tc_offset[HCLGE_MAX_TC_NUM];
3259 u8 rss_key[HCLGE_RSS_KEY_SIZE];
3260 u16 tc_valid[HCLGE_MAX_TC_NUM];
3261 u16 tc_size[HCLGE_MAX_TC_NUM];
3262 u32 *rss_indir = NULL;
3263 u16 rss_size = 0, roundup_size;
3264 const u8 *key;
3265 int i, ret, j;
3267 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
3268 if (!rss_indir)
3269 return -ENOMEM;
3271 /* Get default RSS key */
3272 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
3274 /* Initialize RSS indirect table for each vport */
3275 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3276 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
3277 vport[j].rss_indirection_tbl[i] =
3278 i % vport[j].alloc_rss_size;
3280 /* vport 0 is for PF */
3281 if (j != 0)
3282 continue;
3284 rss_size = vport[j].alloc_rss_size;
3285 rss_indir[i] = vport[j].rss_indirection_tbl[i];
3288 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3289 if (ret)
3290 goto err;
3292 key = rss_key;
3293 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3294 if (ret)
3295 goto err;
3297 ret = hclge_set_rss_input_tuple(hdev);
3298 if (ret)
3299 goto err;
3301 /* Each TC have the same queue size, and tc_size set to hardware is
3302 * the log2 of roundup power of two of rss_size, the acutal queue
3303 * size is limited by indirection table.
3305 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3306 dev_err(&hdev->pdev->dev,
3307 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3308 rss_size);
3309 ret = -EINVAL;
3310 goto err;
3313 roundup_size = roundup_pow_of_two(rss_size);
3314 roundup_size = ilog2(roundup_size);
3316 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3317 tc_valid[i] = 0;
3319 if (!(hdev->hw_tc_map & BIT(i)))
3320 continue;
3322 tc_valid[i] = 1;
3323 tc_size[i] = roundup_size;
3324 tc_offset[i] = rss_size * i;
3327 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3329 err:
3330 kfree(rss_indir);
3332 return ret;
3335 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3336 int vector_id, bool en,
3337 struct hnae3_ring_chain_node *ring_chain)
3339 struct hclge_dev *hdev = vport->back;
3340 struct hnae3_ring_chain_node *node;
3341 struct hclge_desc desc;
3342 struct hclge_ctrl_vector_chain_cmd *req
3343 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3344 enum hclge_cmd_status status;
3345 enum hclge_opcode_type op;
3346 u16 tqp_type_and_id;
3347 int i;
3349 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3350 hclge_cmd_setup_basic_desc(&desc, op, false);
3351 req->int_vector_id = vector_id;
3353 i = 0;
3354 for (node = ring_chain; node; node = node->next) {
3355 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3356 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3357 HCLGE_INT_TYPE_S,
3358 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3359 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3360 HCLGE_TQP_ID_S, node->tqp_index);
3361 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3362 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3363 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3364 req->vfid = vport->vport_id;
3366 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3367 if (status) {
3368 dev_err(&hdev->pdev->dev,
3369 "Map TQP fail, status is %d.\n",
3370 status);
3371 return -EIO;
3373 i = 0;
3375 hclge_cmd_setup_basic_desc(&desc,
3377 false);
3378 req->int_vector_id = vector_id;
3382 if (i > 0) {
3383 req->int_cause_num = i;
3384 req->vfid = vport->vport_id;
3385 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3386 if (status) {
3387 dev_err(&hdev->pdev->dev,
3388 "Map TQP fail, status is %d.\n", status);
3389 return -EIO;
3393 return 0;
3396 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3397 int vector,
3398 struct hnae3_ring_chain_node *ring_chain)
3400 struct hclge_vport *vport = hclge_get_vport(handle);
3401 struct hclge_dev *hdev = vport->back;
3402 int vector_id;
3404 vector_id = hclge_get_vector_index(hdev, vector);
3405 if (vector_id < 0) {
3406 dev_err(&hdev->pdev->dev,
3407 "Get vector index fail. vector_id =%d\n", vector_id);
3408 return vector_id;
3411 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3414 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3415 int vector,
3416 struct hnae3_ring_chain_node *ring_chain)
3418 struct hclge_vport *vport = hclge_get_vport(handle);
3419 struct hclge_dev *hdev = vport->back;
3420 int vector_id, ret;
3422 vector_id = hclge_get_vector_index(hdev, vector);
3423 if (vector_id < 0) {
3424 dev_err(&handle->pdev->dev,
3425 "Get vector index fail. ret =%d\n", vector_id);
3426 return vector_id;
3429 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3430 if (ret) {
3431 dev_err(&handle->pdev->dev,
3432 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3433 vector_id,
3434 ret);
3435 return ret;
3438 /* Free this MSIX or MSI vector */
3439 hclge_free_vector(hdev, vector_id);
3441 return 0;
3444 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3445 struct hclge_promisc_param *param)
3447 struct hclge_promisc_cfg_cmd *req;
3448 struct hclge_desc desc;
3449 int ret;
3451 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3453 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3454 req->vf_id = param->vf_id;
3455 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3457 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3458 if (ret) {
3459 dev_err(&hdev->pdev->dev,
3460 "Set promisc mode fail, status is %d.\n", ret);
3461 return ret;
3463 return 0;
3466 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3467 bool en_mc, bool en_bc, int vport_id)
3469 if (!param)
3470 return;
3472 memset(param, 0, sizeof(struct hclge_promisc_param));
3473 if (en_uc)
3474 param->enable = HCLGE_PROMISC_EN_UC;
3475 if (en_mc)
3476 param->enable |= HCLGE_PROMISC_EN_MC;
3477 if (en_bc)
3478 param->enable |= HCLGE_PROMISC_EN_BC;
3479 param->vf_id = vport_id;
3482 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3484 struct hclge_vport *vport = hclge_get_vport(handle);
3485 struct hclge_dev *hdev = vport->back;
3486 struct hclge_promisc_param param;
3488 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
3489 hclge_cmd_set_promisc_mode(hdev, &param);
3492 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3494 struct hclge_desc desc;
3495 struct hclge_config_mac_mode_cmd *req =
3496 (struct hclge_config_mac_mode_cmd *)desc.data;
3497 u32 loop_en = 0;
3498 int ret;
3500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3501 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3502 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3503 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3504 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3505 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3506 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3507 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3508 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3509 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3510 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3511 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3512 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3513 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3514 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3515 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3517 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3518 if (ret)
3519 dev_err(&hdev->pdev->dev,
3520 "mac enable fail, ret =%d.\n", ret);
3523 static int hclge_set_loopback(struct hnae3_handle *handle,
3524 enum hnae3_loop loop_mode, bool en)
3526 struct hclge_vport *vport = hclge_get_vport(handle);
3527 struct hclge_config_mac_mode_cmd *req;
3528 struct hclge_dev *hdev = vport->back;
3529 struct hclge_desc desc;
3530 u32 loop_en;
3531 int ret;
3533 switch (loop_mode) {
3534 case HNAE3_MAC_INTER_LOOP_MAC:
3535 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3536 /* 1 Read out the MAC mode config at first */
3537 hclge_cmd_setup_basic_desc(&desc,
3538 HCLGE_OPC_CONFIG_MAC_MODE,
3539 true);
3540 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3541 if (ret) {
3542 dev_err(&hdev->pdev->dev,
3543 "mac loopback get fail, ret =%d.\n",
3544 ret);
3545 return ret;
3548 /* 2 Then setup the loopback flag */
3549 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3550 if (en)
3551 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3552 else
3553 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3555 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3557 /* 3 Config mac work mode with loopback flag
3558 * and its original configure parameters
3560 hclge_cmd_reuse_desc(&desc, false);
3561 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3562 if (ret)
3563 dev_err(&hdev->pdev->dev,
3564 "mac loopback set fail, ret =%d.\n", ret);
3565 break;
3566 default:
3567 ret = -ENOTSUPP;
3568 dev_err(&hdev->pdev->dev,
3569 "loop_mode %d is not supported\n", loop_mode);
3570 break;
3573 return ret;
3576 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3577 int stream_id, bool enable)
3579 struct hclge_desc desc;
3580 struct hclge_cfg_com_tqp_queue_cmd *req =
3581 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3582 int ret;
3584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3585 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3586 req->stream_id = cpu_to_le16(stream_id);
3587 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3589 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3590 if (ret)
3591 dev_err(&hdev->pdev->dev,
3592 "Tqp enable fail, status =%d.\n", ret);
3593 return ret;
3596 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3598 struct hclge_vport *vport = hclge_get_vport(handle);
3599 struct hnae3_queue *queue;
3600 struct hclge_tqp *tqp;
3601 int i;
3603 for (i = 0; i < vport->alloc_tqps; i++) {
3604 queue = handle->kinfo.tqp[i];
3605 tqp = container_of(queue, struct hclge_tqp, q);
3606 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3610 static int hclge_ae_start(struct hnae3_handle *handle)
3612 struct hclge_vport *vport = hclge_get_vport(handle);
3613 struct hclge_dev *hdev = vport->back;
3614 int i, queue_id, ret;
3616 for (i = 0; i < vport->alloc_tqps; i++) {
3617 /* todo clear interrupt */
3618 /* ring enable */
3619 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3620 if (queue_id < 0) {
3621 dev_warn(&hdev->pdev->dev,
3622 "Get invalid queue id, ignore it\n");
3623 continue;
3626 hclge_tqp_enable(hdev, queue_id, 0, true);
3628 /* mac enable */
3629 hclge_cfg_mac_mode(hdev, true);
3630 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3631 mod_timer(&hdev->service_timer, jiffies + HZ);
3633 ret = hclge_mac_start_phy(hdev);
3634 if (ret)
3635 return ret;
3637 /* reset tqp stats */
3638 hclge_reset_tqp_stats(handle);
3640 return 0;
3643 static void hclge_ae_stop(struct hnae3_handle *handle)
3645 struct hclge_vport *vport = hclge_get_vport(handle);
3646 struct hclge_dev *hdev = vport->back;
3647 int i, queue_id;
3649 for (i = 0; i < vport->alloc_tqps; i++) {
3650 /* Ring disable */
3651 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3652 if (queue_id < 0) {
3653 dev_warn(&hdev->pdev->dev,
3654 "Get invalid queue id, ignore it\n");
3655 continue;
3658 hclge_tqp_enable(hdev, queue_id, 0, false);
3660 /* Mac disable */
3661 hclge_cfg_mac_mode(hdev, false);
3663 hclge_mac_stop_phy(hdev);
3665 /* reset tqp stats */
3666 hclge_reset_tqp_stats(handle);
3669 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3670 u16 cmdq_resp, u8 resp_code,
3671 enum hclge_mac_vlan_tbl_opcode op)
3673 struct hclge_dev *hdev = vport->back;
3674 int return_status = -EIO;
3676 if (cmdq_resp) {
3677 dev_err(&hdev->pdev->dev,
3678 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3679 cmdq_resp);
3680 return -EIO;
3683 if (op == HCLGE_MAC_VLAN_ADD) {
3684 if ((!resp_code) || (resp_code == 1)) {
3685 return_status = 0;
3686 } else if (resp_code == 2) {
3687 return_status = -EIO;
3688 dev_err(&hdev->pdev->dev,
3689 "add mac addr failed for uc_overflow.\n");
3690 } else if (resp_code == 3) {
3691 return_status = -EIO;
3692 dev_err(&hdev->pdev->dev,
3693 "add mac addr failed for mc_overflow.\n");
3694 } else {
3695 dev_err(&hdev->pdev->dev,
3696 "add mac addr failed for undefined, code=%d.\n",
3697 resp_code);
3699 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3700 if (!resp_code) {
3701 return_status = 0;
3702 } else if (resp_code == 1) {
3703 return_status = -EIO;
3704 dev_dbg(&hdev->pdev->dev,
3705 "remove mac addr failed for miss.\n");
3706 } else {
3707 dev_err(&hdev->pdev->dev,
3708 "remove mac addr failed for undefined, code=%d.\n",
3709 resp_code);
3711 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3712 if (!resp_code) {
3713 return_status = 0;
3714 } else if (resp_code == 1) {
3715 return_status = -EIO;
3716 dev_dbg(&hdev->pdev->dev,
3717 "lookup mac addr failed for miss.\n");
3718 } else {
3719 dev_err(&hdev->pdev->dev,
3720 "lookup mac addr failed for undefined, code=%d.\n",
3721 resp_code);
3723 } else {
3724 return_status = -EIO;
3725 dev_err(&hdev->pdev->dev,
3726 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3727 op);
3730 return return_status;
3733 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3735 int word_num;
3736 int bit_num;
3738 if (vfid > 255 || vfid < 0)
3739 return -EIO;
3741 if (vfid >= 0 && vfid <= 191) {
3742 word_num = vfid / 32;
3743 bit_num = vfid % 32;
3744 if (clr)
3745 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3746 else
3747 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3748 } else {
3749 word_num = (vfid - 192) / 32;
3750 bit_num = vfid % 32;
3751 if (clr)
3752 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3753 else
3754 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3757 return 0;
3760 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3762 #define HCLGE_DESC_NUMBER 3
3763 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3764 int i, j;
3766 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3767 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3768 if (desc[i].data[j])
3769 return false;
3771 return true;
3774 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3775 const u8 *addr)
3777 const unsigned char *mac_addr = addr;
3778 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3779 (mac_addr[0]) | (mac_addr[1] << 8);
3780 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3782 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3783 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3786 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3787 const u8 *addr)
3789 u16 high_val = addr[1] | (addr[0] << 8);
3790 struct hclge_dev *hdev = vport->back;
3791 u32 rsh = 4 - hdev->mta_mac_sel_type;
3792 u16 ret_val = (high_val >> rsh) & 0xfff;
3794 return ret_val;
3797 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3798 enum hclge_mta_dmac_sel_type mta_mac_sel,
3799 bool enable)
3801 struct hclge_mta_filter_mode_cmd *req;
3802 struct hclge_desc desc;
3803 int ret;
3805 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3806 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3808 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3809 enable);
3810 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3811 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3813 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3814 if (ret) {
3815 dev_err(&hdev->pdev->dev,
3816 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3817 ret);
3818 return ret;
3821 return 0;
3824 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3825 u8 func_id,
3826 bool enable)
3828 struct hclge_cfg_func_mta_filter_cmd *req;
3829 struct hclge_desc desc;
3830 int ret;
3832 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3835 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3836 enable);
3837 req->function_id = func_id;
3839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3840 if (ret) {
3841 dev_err(&hdev->pdev->dev,
3842 "Config func_id enable failed for cmd_send, ret =%d.\n",
3843 ret);
3844 return ret;
3847 return 0;
3850 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3851 u16 idx,
3852 bool enable)
3854 struct hclge_dev *hdev = vport->back;
3855 struct hclge_cfg_func_mta_item_cmd *req;
3856 struct hclge_desc desc;
3857 u16 item_idx = 0;
3858 int ret;
3860 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3861 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3862 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3864 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3865 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3866 req->item_idx = cpu_to_le16(item_idx);
3868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3869 if (ret) {
3870 dev_err(&hdev->pdev->dev,
3871 "Config mta table item failed for cmd_send, ret =%d.\n",
3872 ret);
3873 return ret;
3876 return 0;
3879 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3880 struct hclge_mac_vlan_tbl_entry_cmd *req)
3882 struct hclge_dev *hdev = vport->back;
3883 struct hclge_desc desc;
3884 u8 resp_code;
3885 u16 retval;
3886 int ret;
3888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3890 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3892 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3893 if (ret) {
3894 dev_err(&hdev->pdev->dev,
3895 "del mac addr failed for cmd_send, ret =%d.\n",
3896 ret);
3897 return ret;
3899 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3900 retval = le16_to_cpu(desc.retval);
3902 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3903 HCLGE_MAC_VLAN_REMOVE);
3906 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3907 struct hclge_mac_vlan_tbl_entry_cmd *req,
3908 struct hclge_desc *desc,
3909 bool is_mc)
3911 struct hclge_dev *hdev = vport->back;
3912 u8 resp_code;
3913 u16 retval;
3914 int ret;
3916 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3917 if (is_mc) {
3918 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3919 memcpy(desc[0].data,
3920 req,
3921 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3922 hclge_cmd_setup_basic_desc(&desc[1],
3923 HCLGE_OPC_MAC_VLAN_ADD,
3924 true);
3925 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3926 hclge_cmd_setup_basic_desc(&desc[2],
3927 HCLGE_OPC_MAC_VLAN_ADD,
3928 true);
3929 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3930 } else {
3931 memcpy(desc[0].data,
3932 req,
3933 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3934 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3936 if (ret) {
3937 dev_err(&hdev->pdev->dev,
3938 "lookup mac addr failed for cmd_send, ret =%d.\n",
3939 ret);
3940 return ret;
3942 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3943 retval = le16_to_cpu(desc[0].retval);
3945 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3946 HCLGE_MAC_VLAN_LKUP);
3949 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3950 struct hclge_mac_vlan_tbl_entry_cmd *req,
3951 struct hclge_desc *mc_desc)
3953 struct hclge_dev *hdev = vport->back;
3954 int cfg_status;
3955 u8 resp_code;
3956 u16 retval;
3957 int ret;
3959 if (!mc_desc) {
3960 struct hclge_desc desc;
3962 hclge_cmd_setup_basic_desc(&desc,
3963 HCLGE_OPC_MAC_VLAN_ADD,
3964 false);
3965 memcpy(desc.data, req,
3966 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3967 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3968 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3969 retval = le16_to_cpu(desc.retval);
3971 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3972 resp_code,
3973 HCLGE_MAC_VLAN_ADD);
3974 } else {
3975 hclge_cmd_reuse_desc(&mc_desc[0], false);
3976 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3977 hclge_cmd_reuse_desc(&mc_desc[1], false);
3978 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3979 hclge_cmd_reuse_desc(&mc_desc[2], false);
3980 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3981 memcpy(mc_desc[0].data, req,
3982 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3983 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3984 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3985 retval = le16_to_cpu(mc_desc[0].retval);
3987 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3988 resp_code,
3989 HCLGE_MAC_VLAN_ADD);
3992 if (ret) {
3993 dev_err(&hdev->pdev->dev,
3994 "add mac addr failed for cmd_send, ret =%d.\n",
3995 ret);
3996 return ret;
3999 return cfg_status;
4002 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4003 const unsigned char *addr)
4005 struct hclge_vport *vport = hclge_get_vport(handle);
4007 return hclge_add_uc_addr_common(vport, addr);
4010 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4011 const unsigned char *addr)
4013 struct hclge_dev *hdev = vport->back;
4014 struct hclge_mac_vlan_tbl_entry_cmd req;
4015 enum hclge_cmd_status status;
4016 u16 egress_port = 0;
4018 /* mac addr check */
4019 if (is_zero_ether_addr(addr) ||
4020 is_broadcast_ether_addr(addr) ||
4021 is_multicast_ether_addr(addr)) {
4022 dev_err(&hdev->pdev->dev,
4023 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4024 addr,
4025 is_zero_ether_addr(addr),
4026 is_broadcast_ether_addr(addr),
4027 is_multicast_ether_addr(addr));
4028 return -EINVAL;
4031 memset(&req, 0, sizeof(req));
4032 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4033 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4034 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
4035 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4037 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
4038 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
4039 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4040 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4041 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
4042 HCLGE_MAC_EPORT_PFID_S, 0);
4044 req.egress_port = cpu_to_le16(egress_port);
4046 hclge_prepare_mac_addr(&req, addr);
4048 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
4050 return status;
4053 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4054 const unsigned char *addr)
4056 struct hclge_vport *vport = hclge_get_vport(handle);
4058 return hclge_rm_uc_addr_common(vport, addr);
4061 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4062 const unsigned char *addr)
4064 struct hclge_dev *hdev = vport->back;
4065 struct hclge_mac_vlan_tbl_entry_cmd req;
4066 enum hclge_cmd_status status;
4068 /* mac addr check */
4069 if (is_zero_ether_addr(addr) ||
4070 is_broadcast_ether_addr(addr) ||
4071 is_multicast_ether_addr(addr)) {
4072 dev_dbg(&hdev->pdev->dev,
4073 "Remove mac err! invalid mac:%pM.\n",
4074 addr);
4075 return -EINVAL;
4078 memset(&req, 0, sizeof(req));
4079 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4080 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4081 hclge_prepare_mac_addr(&req, addr);
4082 status = hclge_remove_mac_vlan_tbl(vport, &req);
4084 return status;
4087 static int hclge_add_mc_addr(struct hnae3_handle *handle,
4088 const unsigned char *addr)
4090 struct hclge_vport *vport = hclge_get_vport(handle);
4092 return hclge_add_mc_addr_common(vport, addr);
4095 int hclge_add_mc_addr_common(struct hclge_vport *vport,
4096 const unsigned char *addr)
4098 struct hclge_dev *hdev = vport->back;
4099 struct hclge_mac_vlan_tbl_entry_cmd req;
4100 struct hclge_desc desc[3];
4101 u16 tbl_idx;
4102 int status;
4104 /* mac addr check */
4105 if (!is_multicast_ether_addr(addr)) {
4106 dev_err(&hdev->pdev->dev,
4107 "Add mc mac err! invalid mac:%pM.\n",
4108 addr);
4109 return -EINVAL;
4111 memset(&req, 0, sizeof(req));
4112 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4113 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4114 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4115 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4116 hclge_prepare_mac_addr(&req, addr);
4117 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4118 if (!status) {
4119 /* This mac addr exist, update VFID for it */
4120 hclge_update_desc_vfid(desc, vport->vport_id, false);
4121 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4122 } else {
4123 /* This mac addr do not exist, add new entry for it */
4124 memset(desc[0].data, 0, sizeof(desc[0].data));
4125 memset(desc[1].data, 0, sizeof(desc[0].data));
4126 memset(desc[2].data, 0, sizeof(desc[0].data));
4127 hclge_update_desc_vfid(desc, vport->vport_id, false);
4128 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4131 /* Set MTA table for this MAC address */
4132 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4133 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4135 return status;
4138 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4139 const unsigned char *addr)
4141 struct hclge_vport *vport = hclge_get_vport(handle);
4143 return hclge_rm_mc_addr_common(vport, addr);
4146 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4147 const unsigned char *addr)
4149 struct hclge_dev *hdev = vport->back;
4150 struct hclge_mac_vlan_tbl_entry_cmd req;
4151 enum hclge_cmd_status status;
4152 struct hclge_desc desc[3];
4153 u16 tbl_idx;
4155 /* mac addr check */
4156 if (!is_multicast_ether_addr(addr)) {
4157 dev_dbg(&hdev->pdev->dev,
4158 "Remove mc mac err! invalid mac:%pM.\n",
4159 addr);
4160 return -EINVAL;
4163 memset(&req, 0, sizeof(req));
4164 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4165 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4166 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4167 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4168 hclge_prepare_mac_addr(&req, addr);
4169 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4170 if (!status) {
4171 /* This mac addr exist, remove this handle's VFID for it */
4172 hclge_update_desc_vfid(desc, vport->vport_id, true);
4174 if (hclge_is_all_function_id_zero(desc))
4175 /* All the vfid is zero, so need to delete this entry */
4176 status = hclge_remove_mac_vlan_tbl(vport, &req);
4177 else
4178 /* Not all the vfid is zero, update the vfid */
4179 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4181 } else {
4182 /* This mac addr do not exist, can't delete it */
4183 dev_err(&hdev->pdev->dev,
4184 "Rm multicast mac addr failed, ret = %d.\n",
4185 status);
4186 return -EIO;
4189 /* Set MTB table for this MAC address */
4190 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4191 status = hclge_set_mta_table_item(vport, tbl_idx, false);
4193 return status;
4196 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4198 struct hclge_vport *vport = hclge_get_vport(handle);
4199 struct hclge_dev *hdev = vport->back;
4201 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4204 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
4206 const unsigned char *new_addr = (const unsigned char *)p;
4207 struct hclge_vport *vport = hclge_get_vport(handle);
4208 struct hclge_dev *hdev = vport->back;
4210 /* mac addr check */
4211 if (is_zero_ether_addr(new_addr) ||
4212 is_broadcast_ether_addr(new_addr) ||
4213 is_multicast_ether_addr(new_addr)) {
4214 dev_err(&hdev->pdev->dev,
4215 "Change uc mac err! invalid mac:%p.\n",
4216 new_addr);
4217 return -EINVAL;
4220 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
4222 if (!hclge_add_uc_addr(handle, new_addr)) {
4223 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4224 return 0;
4227 return -EIO;
4230 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4231 bool filter_en)
4233 struct hclge_vlan_filter_ctrl_cmd *req;
4234 struct hclge_desc desc;
4235 int ret;
4237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4239 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4240 req->vlan_type = vlan_type;
4241 req->vlan_fe = filter_en;
4243 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4244 if (ret) {
4245 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4246 ret);
4247 return ret;
4250 return 0;
4253 #define HCLGE_FILTER_TYPE_VF 0
4254 #define HCLGE_FILTER_TYPE_PORT 1
4256 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4258 struct hclge_vport *vport = hclge_get_vport(handle);
4259 struct hclge_dev *hdev = vport->back;
4261 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4264 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4265 bool is_kill, u16 vlan, u8 qos, __be16 proto)
4267 #define HCLGE_MAX_VF_BYTES 16
4268 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4269 struct hclge_vlan_filter_vf_cfg_cmd *req1;
4270 struct hclge_desc desc[2];
4271 u8 vf_byte_val;
4272 u8 vf_byte_off;
4273 int ret;
4275 hclge_cmd_setup_basic_desc(&desc[0],
4276 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4277 hclge_cmd_setup_basic_desc(&desc[1],
4278 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4280 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4282 vf_byte_off = vfid / 8;
4283 vf_byte_val = 1 << (vfid % 8);
4285 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4286 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4288 req0->vlan_id = cpu_to_le16(vlan);
4289 req0->vlan_cfg = is_kill;
4291 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4292 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4293 else
4294 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4296 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4297 if (ret) {
4298 dev_err(&hdev->pdev->dev,
4299 "Send vf vlan command fail, ret =%d.\n",
4300 ret);
4301 return ret;
4304 if (!is_kill) {
4305 if (!req0->resp_code || req0->resp_code == 1)
4306 return 0;
4308 dev_err(&hdev->pdev->dev,
4309 "Add vf vlan filter fail, ret =%d.\n",
4310 req0->resp_code);
4311 } else {
4312 if (!req0->resp_code)
4313 return 0;
4315 dev_err(&hdev->pdev->dev,
4316 "Kill vf vlan filter fail, ret =%d.\n",
4317 req0->resp_code);
4320 return -EIO;
4323 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
4324 __be16 proto, u16 vlan_id,
4325 bool is_kill)
4327 struct hclge_vport *vport = hclge_get_vport(handle);
4328 struct hclge_dev *hdev = vport->back;
4329 struct hclge_vlan_filter_pf_cfg_cmd *req;
4330 struct hclge_desc desc;
4331 u8 vlan_offset_byte_val;
4332 u8 vlan_offset_byte;
4333 u8 vlan_offset_160;
4334 int ret;
4336 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4338 vlan_offset_160 = vlan_id / 160;
4339 vlan_offset_byte = (vlan_id % 160) / 8;
4340 vlan_offset_byte_val = 1 << (vlan_id % 8);
4342 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4343 req->vlan_offset = vlan_offset_160;
4344 req->vlan_cfg = is_kill;
4345 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4347 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4348 if (ret) {
4349 dev_err(&hdev->pdev->dev,
4350 "port vlan command, send fail, ret =%d.\n",
4351 ret);
4352 return ret;
4355 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
4356 if (ret) {
4357 dev_err(&hdev->pdev->dev,
4358 "Set pf vlan filter config fail, ret =%d.\n",
4359 ret);
4360 return -EIO;
4363 return 0;
4366 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4367 u16 vlan, u8 qos, __be16 proto)
4369 struct hclge_vport *vport = hclge_get_vport(handle);
4370 struct hclge_dev *hdev = vport->back;
4372 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4373 return -EINVAL;
4374 if (proto != htons(ETH_P_8021Q))
4375 return -EPROTONOSUPPORT;
4377 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
4380 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
4382 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
4383 struct hclge_vport_vtag_tx_cfg_cmd *req;
4384 struct hclge_dev *hdev = vport->back;
4385 struct hclge_desc desc;
4386 int status;
4388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
4390 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
4391 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
4392 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
4393 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
4394 vcfg->accept_tag ? 1 : 0);
4395 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
4396 vcfg->accept_untag ? 1 : 0);
4397 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
4398 vcfg->insert_tag1_en ? 1 : 0);
4399 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
4400 vcfg->insert_tag2_en ? 1 : 0);
4401 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
4403 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4404 req->vf_bitmap[req->vf_offset] =
4405 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4407 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4408 if (status)
4409 dev_err(&hdev->pdev->dev,
4410 "Send port txvlan cfg command fail, ret =%d\n",
4411 status);
4413 return status;
4416 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
4418 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
4419 struct hclge_vport_vtag_rx_cfg_cmd *req;
4420 struct hclge_dev *hdev = vport->back;
4421 struct hclge_desc desc;
4422 int status;
4424 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
4426 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
4427 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
4428 vcfg->strip_tag1_en ? 1 : 0);
4429 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
4430 vcfg->strip_tag2_en ? 1 : 0);
4431 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
4432 vcfg->vlan1_vlan_prionly ? 1 : 0);
4433 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
4434 vcfg->vlan2_vlan_prionly ? 1 : 0);
4436 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4437 req->vf_bitmap[req->vf_offset] =
4438 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4440 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4441 if (status)
4442 dev_err(&hdev->pdev->dev,
4443 "Send port rxvlan cfg command fail, ret =%d\n",
4444 status);
4446 return status;
4449 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4451 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
4452 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
4453 struct hclge_desc desc;
4454 int status;
4456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
4457 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
4458 rx_req->ot_fst_vlan_type =
4459 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
4460 rx_req->ot_sec_vlan_type =
4461 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
4462 rx_req->in_fst_vlan_type =
4463 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
4464 rx_req->in_sec_vlan_type =
4465 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
4467 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4468 if (status) {
4469 dev_err(&hdev->pdev->dev,
4470 "Send rxvlan protocol type command fail, ret =%d\n",
4471 status);
4472 return status;
4475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
4477 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
4478 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
4479 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
4481 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4482 if (status)
4483 dev_err(&hdev->pdev->dev,
4484 "Send txvlan protocol type command fail, ret =%d\n",
4485 status);
4487 return status;
4490 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4492 #define HCLGE_DEF_VLAN_TYPE 0x8100
4494 struct hnae3_handle *handle;
4495 struct hclge_vport *vport;
4496 int ret;
4497 int i;
4499 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4500 if (ret)
4501 return ret;
4503 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4504 if (ret)
4505 return ret;
4507 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4508 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4509 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4510 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4511 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
4512 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
4514 ret = hclge_set_vlan_protocol_type(hdev);
4515 if (ret)
4516 return ret;
4518 for (i = 0; i < hdev->num_alloc_vport; i++) {
4519 vport = &hdev->vport[i];
4520 vport->txvlan_cfg.accept_tag = true;
4521 vport->txvlan_cfg.accept_untag = true;
4522 vport->txvlan_cfg.insert_tag1_en = false;
4523 vport->txvlan_cfg.insert_tag2_en = false;
4524 vport->txvlan_cfg.default_tag1 = 0;
4525 vport->txvlan_cfg.default_tag2 = 0;
4527 ret = hclge_set_vlan_tx_offload_cfg(vport);
4528 if (ret)
4529 return ret;
4531 vport->rxvlan_cfg.strip_tag1_en = false;
4532 vport->rxvlan_cfg.strip_tag2_en = true;
4533 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4534 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4536 ret = hclge_set_vlan_rx_offload_cfg(vport);
4537 if (ret)
4538 return ret;
4541 handle = &hdev->vport[0].nic;
4542 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4545 static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
4547 struct hclge_vport *vport = hclge_get_vport(handle);
4549 vport->rxvlan_cfg.strip_tag1_en = false;
4550 vport->rxvlan_cfg.strip_tag2_en = enable;
4551 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4552 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4554 return hclge_set_vlan_rx_offload_cfg(vport);
4557 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4559 struct hclge_vport *vport = hclge_get_vport(handle);
4560 struct hclge_config_max_frm_size_cmd *req;
4561 struct hclge_dev *hdev = vport->back;
4562 struct hclge_desc desc;
4563 int ret;
4565 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4566 return -EINVAL;
4568 hdev->mps = new_mtu;
4569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4571 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4572 req->max_frm_size = cpu_to_le16(new_mtu);
4574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4575 if (ret) {
4576 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4577 return ret;
4580 return 0;
4583 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4584 bool enable)
4586 struct hclge_reset_tqp_queue_cmd *req;
4587 struct hclge_desc desc;
4588 int ret;
4590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4592 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4593 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4594 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4597 if (ret) {
4598 dev_err(&hdev->pdev->dev,
4599 "Send tqp reset cmd error, status =%d\n", ret);
4600 return ret;
4603 return 0;
4606 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4608 struct hclge_reset_tqp_queue_cmd *req;
4609 struct hclge_desc desc;
4610 int ret;
4612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4614 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4615 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4618 if (ret) {
4619 dev_err(&hdev->pdev->dev,
4620 "Get reset status error, status =%d\n", ret);
4621 return ret;
4624 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4627 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4629 struct hclge_vport *vport = hclge_get_vport(handle);
4630 struct hclge_dev *hdev = vport->back;
4631 int reset_try_times = 0;
4632 int reset_status;
4633 int ret;
4635 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4636 if (ret) {
4637 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4638 return;
4641 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4642 if (ret) {
4643 dev_warn(&hdev->pdev->dev,
4644 "Send reset tqp cmd fail, ret = %d\n", ret);
4645 return;
4648 reset_try_times = 0;
4649 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4650 /* Wait for tqp hw reset */
4651 msleep(20);
4652 reset_status = hclge_get_reset_status(hdev, queue_id);
4653 if (reset_status)
4654 break;
4657 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4658 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4659 return;
4662 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4663 if (ret) {
4664 dev_warn(&hdev->pdev->dev,
4665 "Deassert the soft reset fail, ret = %d\n", ret);
4666 return;
4670 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4672 struct hclge_vport *vport = hclge_get_vport(handle);
4673 struct hclge_dev *hdev = vport->back;
4675 return hdev->fw_version;
4678 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
4679 u32 *flowctrl_adv)
4681 struct hclge_vport *vport = hclge_get_vport(handle);
4682 struct hclge_dev *hdev = vport->back;
4683 struct phy_device *phydev = hdev->hw.mac.phydev;
4685 if (!phydev)
4686 return;
4688 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
4689 (phydev->advertising & ADVERTISED_Asym_Pause);
4692 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
4694 struct phy_device *phydev = hdev->hw.mac.phydev;
4696 if (!phydev)
4697 return;
4699 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
4701 if (rx_en)
4702 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
4704 if (tx_en)
4705 phydev->advertising ^= ADVERTISED_Asym_Pause;
4708 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
4710 enum hclge_fc_mode fc_mode;
4711 int ret;
4713 if (rx_en && tx_en)
4714 fc_mode = HCLGE_FC_FULL;
4715 else if (rx_en && !tx_en)
4716 fc_mode = HCLGE_FC_RX_PAUSE;
4717 else if (!rx_en && tx_en)
4718 fc_mode = HCLGE_FC_TX_PAUSE;
4719 else
4720 fc_mode = HCLGE_FC_NONE;
4722 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4723 hdev->fc_mode_last_time = fc_mode;
4724 return 0;
4727 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
4728 if (ret) {
4729 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
4730 ret);
4731 return ret;
4734 hdev->tm_info.fc_mode = fc_mode;
4736 return 0;
4739 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
4741 struct phy_device *phydev = hdev->hw.mac.phydev;
4742 u16 remote_advertising = 0;
4743 u16 local_advertising = 0;
4744 u32 rx_pause, tx_pause;
4745 u8 flowctl;
4747 if (!phydev->link || !phydev->autoneg)
4748 return 0;
4750 if (phydev->advertising & ADVERTISED_Pause)
4751 local_advertising = ADVERTISE_PAUSE_CAP;
4753 if (phydev->advertising & ADVERTISED_Asym_Pause)
4754 local_advertising |= ADVERTISE_PAUSE_ASYM;
4756 if (phydev->pause)
4757 remote_advertising = LPA_PAUSE_CAP;
4759 if (phydev->asym_pause)
4760 remote_advertising |= LPA_PAUSE_ASYM;
4762 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
4763 remote_advertising);
4764 tx_pause = flowctl & FLOW_CTRL_TX;
4765 rx_pause = flowctl & FLOW_CTRL_RX;
4767 if (phydev->duplex == HCLGE_MAC_HALF) {
4768 tx_pause = 0;
4769 rx_pause = 0;
4772 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
4775 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4776 u32 *rx_en, u32 *tx_en)
4778 struct hclge_vport *vport = hclge_get_vport(handle);
4779 struct hclge_dev *hdev = vport->back;
4781 *auto_neg = hclge_get_autoneg(handle);
4783 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4784 *rx_en = 0;
4785 *tx_en = 0;
4786 return;
4789 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4790 *rx_en = 1;
4791 *tx_en = 0;
4792 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4793 *tx_en = 1;
4794 *rx_en = 0;
4795 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4796 *rx_en = 1;
4797 *tx_en = 1;
4798 } else {
4799 *rx_en = 0;
4800 *tx_en = 0;
4804 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
4805 u32 rx_en, u32 tx_en)
4807 struct hclge_vport *vport = hclge_get_vport(handle);
4808 struct hclge_dev *hdev = vport->back;
4809 struct phy_device *phydev = hdev->hw.mac.phydev;
4810 u32 fc_autoneg;
4812 /* Only support flow control negotiation for netdev with
4813 * phy attached for now.
4815 if (!phydev)
4816 return -EOPNOTSUPP;
4818 fc_autoneg = hclge_get_autoneg(handle);
4819 if (auto_neg != fc_autoneg) {
4820 dev_info(&hdev->pdev->dev,
4821 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
4822 return -EOPNOTSUPP;
4825 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4826 dev_info(&hdev->pdev->dev,
4827 "Priority flow control enabled. Cannot set link flow control.\n");
4828 return -EOPNOTSUPP;
4831 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
4833 if (!fc_autoneg)
4834 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
4836 return phy_start_aneg(phydev);
4839 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4840 u8 *auto_neg, u32 *speed, u8 *duplex)
4842 struct hclge_vport *vport = hclge_get_vport(handle);
4843 struct hclge_dev *hdev = vport->back;
4845 if (speed)
4846 *speed = hdev->hw.mac.speed;
4847 if (duplex)
4848 *duplex = hdev->hw.mac.duplex;
4849 if (auto_neg)
4850 *auto_neg = hdev->hw.mac.autoneg;
4853 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4855 struct hclge_vport *vport = hclge_get_vport(handle);
4856 struct hclge_dev *hdev = vport->back;
4858 if (media_type)
4859 *media_type = hdev->hw.mac.media_type;
4862 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4863 u8 *tp_mdix_ctrl, u8 *tp_mdix)
4865 struct hclge_vport *vport = hclge_get_vport(handle);
4866 struct hclge_dev *hdev = vport->back;
4867 struct phy_device *phydev = hdev->hw.mac.phydev;
4868 int mdix_ctrl, mdix, retval, is_resolved;
4870 if (!phydev) {
4871 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4872 *tp_mdix = ETH_TP_MDI_INVALID;
4873 return;
4876 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4878 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4879 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4880 HCLGE_PHY_MDIX_CTRL_S);
4882 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4883 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4884 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4886 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4888 switch (mdix_ctrl) {
4889 case 0x0:
4890 *tp_mdix_ctrl = ETH_TP_MDI;
4891 break;
4892 case 0x1:
4893 *tp_mdix_ctrl = ETH_TP_MDI_X;
4894 break;
4895 case 0x3:
4896 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4897 break;
4898 default:
4899 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4900 break;
4903 if (!is_resolved)
4904 *tp_mdix = ETH_TP_MDI_INVALID;
4905 else if (mdix)
4906 *tp_mdix = ETH_TP_MDI_X;
4907 else
4908 *tp_mdix = ETH_TP_MDI;
4911 static int hclge_init_client_instance(struct hnae3_client *client,
4912 struct hnae3_ae_dev *ae_dev)
4914 struct hclge_dev *hdev = ae_dev->priv;
4915 struct hclge_vport *vport;
4916 int i, ret;
4918 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4919 vport = &hdev->vport[i];
4921 switch (client->type) {
4922 case HNAE3_CLIENT_KNIC:
4924 hdev->nic_client = client;
4925 vport->nic.client = client;
4926 ret = client->ops->init_instance(&vport->nic);
4927 if (ret)
4928 goto err;
4930 if (hdev->roce_client &&
4931 hnae3_dev_roce_supported(hdev)) {
4932 struct hnae3_client *rc = hdev->roce_client;
4934 ret = hclge_init_roce_base_info(vport);
4935 if (ret)
4936 goto err;
4938 ret = rc->ops->init_instance(&vport->roce);
4939 if (ret)
4940 goto err;
4943 break;
4944 case HNAE3_CLIENT_UNIC:
4945 hdev->nic_client = client;
4946 vport->nic.client = client;
4948 ret = client->ops->init_instance(&vport->nic);
4949 if (ret)
4950 goto err;
4952 break;
4953 case HNAE3_CLIENT_ROCE:
4954 if (hnae3_dev_roce_supported(hdev)) {
4955 hdev->roce_client = client;
4956 vport->roce.client = client;
4959 if (hdev->roce_client && hdev->nic_client) {
4960 ret = hclge_init_roce_base_info(vport);
4961 if (ret)
4962 goto err;
4964 ret = client->ops->init_instance(&vport->roce);
4965 if (ret)
4966 goto err;
4971 return 0;
4972 err:
4973 return ret;
4976 static void hclge_uninit_client_instance(struct hnae3_client *client,
4977 struct hnae3_ae_dev *ae_dev)
4979 struct hclge_dev *hdev = ae_dev->priv;
4980 struct hclge_vport *vport;
4981 int i;
4983 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4984 vport = &hdev->vport[i];
4985 if (hdev->roce_client) {
4986 hdev->roce_client->ops->uninit_instance(&vport->roce,
4988 hdev->roce_client = NULL;
4989 vport->roce.client = NULL;
4991 if (client->type == HNAE3_CLIENT_ROCE)
4992 return;
4993 if (client->ops->uninit_instance) {
4994 client->ops->uninit_instance(&vport->nic, 0);
4995 hdev->nic_client = NULL;
4996 vport->nic.client = NULL;
5001 static int hclge_pci_init(struct hclge_dev *hdev)
5003 struct pci_dev *pdev = hdev->pdev;
5004 struct hclge_hw *hw;
5005 int ret;
5007 ret = pci_enable_device(pdev);
5008 if (ret) {
5009 dev_err(&pdev->dev, "failed to enable PCI device\n");
5010 goto err_no_drvdata;
5013 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5014 if (ret) {
5015 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5016 if (ret) {
5017 dev_err(&pdev->dev,
5018 "can't set consistent PCI DMA");
5019 goto err_disable_device;
5021 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
5024 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
5025 if (ret) {
5026 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
5027 goto err_disable_device;
5030 pci_set_master(pdev);
5031 hw = &hdev->hw;
5032 hw->back = hdev;
5033 hw->io_base = pcim_iomap(pdev, 2, 0);
5034 if (!hw->io_base) {
5035 dev_err(&pdev->dev, "Can't map configuration register space\n");
5036 ret = -ENOMEM;
5037 goto err_clr_master;
5040 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
5042 return 0;
5043 err_clr_master:
5044 pci_clear_master(pdev);
5045 pci_release_regions(pdev);
5046 err_disable_device:
5047 pci_disable_device(pdev);
5048 err_no_drvdata:
5049 pci_set_drvdata(pdev, NULL);
5051 return ret;
5054 static void hclge_pci_uninit(struct hclge_dev *hdev)
5056 struct pci_dev *pdev = hdev->pdev;
5058 pci_free_irq_vectors(pdev);
5059 pci_clear_master(pdev);
5060 pci_release_mem_regions(pdev);
5061 pci_disable_device(pdev);
5064 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5066 struct pci_dev *pdev = ae_dev->pdev;
5067 struct hclge_dev *hdev;
5068 int ret;
5070 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5071 if (!hdev) {
5072 ret = -ENOMEM;
5073 goto err_hclge_dev;
5076 hdev->pdev = pdev;
5077 hdev->ae_dev = ae_dev;
5078 hdev->reset_type = HNAE3_NONE_RESET;
5079 hdev->reset_request = 0;
5080 hdev->reset_pending = 0;
5081 ae_dev->priv = hdev;
5083 ret = hclge_pci_init(hdev);
5084 if (ret) {
5085 dev_err(&pdev->dev, "PCI init failed\n");
5086 goto err_pci_init;
5089 /* Firmware command queue initialize */
5090 ret = hclge_cmd_queue_init(hdev);
5091 if (ret) {
5092 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5093 return ret;
5096 /* Firmware command initialize */
5097 ret = hclge_cmd_init(hdev);
5098 if (ret)
5099 goto err_cmd_init;
5101 ret = hclge_get_cap(hdev);
5102 if (ret) {
5103 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5104 ret);
5105 return ret;
5108 ret = hclge_configure(hdev);
5109 if (ret) {
5110 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5111 return ret;
5114 ret = hclge_init_msi(hdev);
5115 if (ret) {
5116 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5117 return ret;
5120 ret = hclge_misc_irq_init(hdev);
5121 if (ret) {
5122 dev_err(&pdev->dev,
5123 "Misc IRQ(vector0) init error, ret = %d.\n",
5124 ret);
5125 return ret;
5128 ret = hclge_alloc_tqps(hdev);
5129 if (ret) {
5130 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5131 return ret;
5134 ret = hclge_alloc_vport(hdev);
5135 if (ret) {
5136 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5137 return ret;
5140 ret = hclge_map_tqp(hdev);
5141 if (ret) {
5142 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5143 return ret;
5146 ret = hclge_mac_mdio_config(hdev);
5147 if (ret) {
5148 dev_warn(&hdev->pdev->dev,
5149 "mdio config fail ret=%d\n", ret);
5150 return ret;
5153 ret = hclge_mac_init(hdev);
5154 if (ret) {
5155 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5156 return ret;
5158 ret = hclge_buffer_alloc(hdev);
5159 if (ret) {
5160 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
5161 return ret;
5164 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5165 if (ret) {
5166 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5167 return ret;
5170 ret = hclge_init_vlan_config(hdev);
5171 if (ret) {
5172 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5173 return ret;
5176 ret = hclge_tm_schd_init(hdev);
5177 if (ret) {
5178 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5179 return ret;
5182 ret = hclge_rss_init_hw(hdev);
5183 if (ret) {
5184 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5185 return ret;
5188 hclge_dcb_ops_set(hdev);
5190 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5191 INIT_WORK(&hdev->service_task, hclge_service_task);
5192 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5193 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
5195 /* Enable MISC vector(vector0) */
5196 hclge_enable_vector(&hdev->misc_vector, true);
5198 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
5199 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5200 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
5201 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5202 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
5203 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
5205 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5206 return 0;
5208 err_cmd_init:
5209 pci_release_regions(pdev);
5210 err_pci_init:
5211 pci_set_drvdata(pdev, NULL);
5212 err_hclge_dev:
5213 return ret;
5216 static void hclge_stats_clear(struct hclge_dev *hdev)
5218 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
5221 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5223 struct hclge_dev *hdev = ae_dev->priv;
5224 struct pci_dev *pdev = ae_dev->pdev;
5225 int ret;
5227 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5229 hclge_stats_clear(hdev);
5231 ret = hclge_cmd_init(hdev);
5232 if (ret) {
5233 dev_err(&pdev->dev, "Cmd queue init failed\n");
5234 return ret;
5237 ret = hclge_get_cap(hdev);
5238 if (ret) {
5239 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5240 ret);
5241 return ret;
5244 ret = hclge_configure(hdev);
5245 if (ret) {
5246 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5247 return ret;
5250 ret = hclge_map_tqp(hdev);
5251 if (ret) {
5252 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5253 return ret;
5256 ret = hclge_mac_init(hdev);
5257 if (ret) {
5258 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5259 return ret;
5262 ret = hclge_buffer_alloc(hdev);
5263 if (ret) {
5264 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
5265 return ret;
5268 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5269 if (ret) {
5270 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5271 return ret;
5274 ret = hclge_init_vlan_config(hdev);
5275 if (ret) {
5276 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5277 return ret;
5280 ret = hclge_tm_schd_init(hdev);
5281 if (ret) {
5282 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5283 return ret;
5286 ret = hclge_rss_init_hw(hdev);
5287 if (ret) {
5288 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5289 return ret;
5292 /* Enable MISC vector(vector0) */
5293 hclge_enable_vector(&hdev->misc_vector, true);
5295 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
5296 HCLGE_DRIVER_NAME);
5298 return 0;
5301 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
5303 struct hclge_dev *hdev = ae_dev->priv;
5304 struct hclge_mac *mac = &hdev->hw.mac;
5306 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5308 if (IS_ENABLED(CONFIG_PCI_IOV))
5309 hclge_disable_sriov(hdev);
5311 if (hdev->service_timer.function)
5312 del_timer_sync(&hdev->service_timer);
5313 if (hdev->service_task.func)
5314 cancel_work_sync(&hdev->service_task);
5315 if (hdev->rst_service_task.func)
5316 cancel_work_sync(&hdev->rst_service_task);
5317 if (hdev->mbx_service_task.func)
5318 cancel_work_sync(&hdev->mbx_service_task);
5320 if (mac->phydev)
5321 mdiobus_unregister(mac->mdio_bus);
5323 /* Disable MISC vector(vector0) */
5324 hclge_enable_vector(&hdev->misc_vector, false);
5325 hclge_destroy_cmd_queue(&hdev->hw);
5326 hclge_misc_irq_uninit(hdev);
5327 hclge_pci_uninit(hdev);
5328 ae_dev->priv = NULL;
5331 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5333 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5334 struct hclge_vport *vport = hclge_get_vport(handle);
5335 struct hclge_dev *hdev = vport->back;
5337 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
5340 static void hclge_get_channels(struct hnae3_handle *handle,
5341 struct ethtool_channels *ch)
5343 struct hclge_vport *vport = hclge_get_vport(handle);
5345 ch->max_combined = hclge_get_max_channels(handle);
5346 ch->other_count = 1;
5347 ch->max_other = 1;
5348 ch->combined_count = vport->alloc_tqps;
5351 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5352 u16 *free_tqps, u16 *max_rss_size)
5354 struct hclge_vport *vport = hclge_get_vport(handle);
5355 struct hclge_dev *hdev = vport->back;
5356 u16 temp_tqps = 0;
5357 int i;
5359 for (i = 0; i < hdev->num_tqps; i++) {
5360 if (!hdev->htqp[i].alloced)
5361 temp_tqps++;
5363 *free_tqps = temp_tqps;
5364 *max_rss_size = hdev->rss_size_max;
5367 static void hclge_release_tqp(struct hclge_vport *vport)
5369 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5370 struct hclge_dev *hdev = vport->back;
5371 int i;
5373 for (i = 0; i < kinfo->num_tqps; i++) {
5374 struct hclge_tqp *tqp =
5375 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5377 tqp->q.handle = NULL;
5378 tqp->q.tqp_index = 0;
5379 tqp->alloced = false;
5382 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5383 kinfo->tqp = NULL;
5386 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5388 struct hclge_vport *vport = hclge_get_vport(handle);
5389 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5390 struct hclge_dev *hdev = vport->back;
5391 int cur_rss_size = kinfo->rss_size;
5392 int cur_tqps = kinfo->num_tqps;
5393 u16 tc_offset[HCLGE_MAX_TC_NUM];
5394 u16 tc_valid[HCLGE_MAX_TC_NUM];
5395 u16 tc_size[HCLGE_MAX_TC_NUM];
5396 u16 roundup_size;
5397 u32 *rss_indir;
5398 int ret, i;
5400 hclge_release_tqp(vport);
5402 ret = hclge_knic_setup(vport, new_tqps_num);
5403 if (ret) {
5404 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
5405 return ret;
5408 ret = hclge_map_tqp_to_vport(hdev, vport);
5409 if (ret) {
5410 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
5411 return ret;
5414 ret = hclge_tm_schd_init(hdev);
5415 if (ret) {
5416 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
5417 return ret;
5420 roundup_size = roundup_pow_of_two(kinfo->rss_size);
5421 roundup_size = ilog2(roundup_size);
5422 /* Set the RSS TC mode according to the new RSS size */
5423 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5424 tc_valid[i] = 0;
5426 if (!(hdev->hw_tc_map & BIT(i)))
5427 continue;
5429 tc_valid[i] = 1;
5430 tc_size[i] = roundup_size;
5431 tc_offset[i] = kinfo->rss_size * i;
5433 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5434 if (ret)
5435 return ret;
5437 /* Reinitializes the rss indirect table according to the new RSS size */
5438 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
5439 if (!rss_indir)
5440 return -ENOMEM;
5442 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
5443 rss_indir[i] = i % kinfo->rss_size;
5445 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
5446 if (ret)
5447 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
5448 ret);
5450 kfree(rss_indir);
5452 if (!ret)
5453 dev_info(&hdev->pdev->dev,
5454 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5455 cur_rss_size, kinfo->rss_size,
5456 cur_tqps, kinfo->rss_size * kinfo->num_tc);
5458 return ret;
5461 static const struct hnae3_ae_ops hclge_ops = {
5462 .init_ae_dev = hclge_init_ae_dev,
5463 .uninit_ae_dev = hclge_uninit_ae_dev,
5464 .init_client_instance = hclge_init_client_instance,
5465 .uninit_client_instance = hclge_uninit_client_instance,
5466 .map_ring_to_vector = hclge_map_ring_to_vector,
5467 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
5468 .get_vector = hclge_get_vector,
5469 .set_promisc_mode = hclge_set_promisc_mode,
5470 .set_loopback = hclge_set_loopback,
5471 .start = hclge_ae_start,
5472 .stop = hclge_ae_stop,
5473 .get_status = hclge_get_status,
5474 .get_ksettings_an_result = hclge_get_ksettings_an_result,
5475 .update_speed_duplex_h = hclge_update_speed_duplex_h,
5476 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
5477 .get_media_type = hclge_get_media_type,
5478 .get_rss_key_size = hclge_get_rss_key_size,
5479 .get_rss_indir_size = hclge_get_rss_indir_size,
5480 .get_rss = hclge_get_rss,
5481 .set_rss = hclge_set_rss,
5482 .set_rss_tuple = hclge_set_rss_tuple,
5483 .get_rss_tuple = hclge_get_rss_tuple,
5484 .get_tc_size = hclge_get_tc_size,
5485 .get_mac_addr = hclge_get_mac_addr,
5486 .set_mac_addr = hclge_set_mac_addr,
5487 .add_uc_addr = hclge_add_uc_addr,
5488 .rm_uc_addr = hclge_rm_uc_addr,
5489 .add_mc_addr = hclge_add_mc_addr,
5490 .rm_mc_addr = hclge_rm_mc_addr,
5491 .set_autoneg = hclge_set_autoneg,
5492 .get_autoneg = hclge_get_autoneg,
5493 .get_pauseparam = hclge_get_pauseparam,
5494 .set_pauseparam = hclge_set_pauseparam,
5495 .set_mtu = hclge_set_mtu,
5496 .reset_queue = hclge_reset_tqp,
5497 .get_stats = hclge_get_stats,
5498 .update_stats = hclge_update_stats,
5499 .get_strings = hclge_get_strings,
5500 .get_sset_count = hclge_get_sset_count,
5501 .get_fw_version = hclge_get_fw_version,
5502 .get_mdix_mode = hclge_get_mdix_mode,
5503 .enable_vlan_filter = hclge_enable_vlan_filter,
5504 .set_vlan_filter = hclge_set_port_vlan_filter,
5505 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
5506 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
5507 .reset_event = hclge_reset_event,
5508 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
5509 .set_channels = hclge_set_channels,
5510 .get_channels = hclge_get_channels,
5511 .get_flowctrl_adv = hclge_get_flowctrl_adv,
5514 static struct hnae3_ae_algo ae_algo = {
5515 .ops = &hclge_ops,
5516 .name = HCLGE_NAME,
5517 .pdev_id_table = ae_algo_pci_tbl,
5520 static int hclge_init(void)
5522 pr_info("%s is initializing\n", HCLGE_NAME);
5524 return hnae3_register_ae_algo(&ae_algo);
5527 static void hclge_exit(void)
5529 hnae3_unregister_ae_algo(&ae_algo);
5531 module_init(hclge_init);
5532 module_exit(hclge_exit);
5534 MODULE_LICENSE("GPL");
5535 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5536 MODULE_DESCRIPTION("HCLGE Driver");
5537 MODULE_VERSION(HCLGE_MOD_VERSION);