2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_umem.h>
42 #include "hns_roce_common.h"
43 #include "hns_roce_device.h"
44 #include "hns_roce_cmd.h"
45 #include "hns_roce_hem.h"
46 #include "hns_roce_hw_v2.h"
48 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg
*dseg
,
51 dseg
->lkey
= cpu_to_le32(sg
->lkey
);
52 dseg
->addr
= cpu_to_le64(sg
->addr
);
53 dseg
->len
= cpu_to_le32(sg
->length
);
56 static void set_extend_sge(struct hns_roce_qp
*qp
, struct ib_send_wr
*wr
,
57 unsigned int *sge_ind
)
59 struct hns_roce_v2_wqe_data_seg
*dseg
;
68 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
)
69 num_in_wqe
= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
;
70 extend_sge_num
= wr
->num_sge
- num_in_wqe
;
71 sg
= wr
->sg_list
+ num_in_wqe
;
72 shift
= qp
->hr_buf
.page_shift
;
75 * Check whether wr->num_sge sges are in the same page. If not, we
76 * should calculate how many sges in the first page and the second
79 dseg
= get_send_extend_sge(qp
, (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
80 fi_sge_num
= (round_up((uintptr_t)dseg
, 1 << shift
) -
82 sizeof(struct hns_roce_v2_wqe_data_seg
);
83 if (extend_sge_num
> fi_sge_num
) {
84 se_sge_num
= extend_sge_num
- fi_sge_num
;
85 for (i
= 0; i
< fi_sge_num
; i
++) {
86 set_data_seg_v2(dseg
++, sg
+ i
);
89 dseg
= get_send_extend_sge(qp
,
90 (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
91 for (i
= 0; i
< se_sge_num
; i
++) {
92 set_data_seg_v2(dseg
++, sg
+ fi_sge_num
+ i
);
96 for (i
= 0; i
< extend_sge_num
; i
++) {
97 set_data_seg_v2(dseg
++, sg
+ i
);
103 static int set_rwqe_data_seg(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
104 struct hns_roce_v2_rc_send_wqe
*rc_sq_wqe
,
105 void *wqe
, unsigned int *sge_ind
,
106 struct ib_send_wr
**bad_wr
)
108 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
109 struct hns_roce_v2_wqe_data_seg
*dseg
= wqe
;
110 struct hns_roce_qp
*qp
= to_hr_qp(ibqp
);
113 if (wr
->send_flags
& IB_SEND_INLINE
&& wr
->num_sge
) {
114 if (le32_to_cpu(rc_sq_wqe
->msg_len
) >
115 hr_dev
->caps
.max_sq_inline
) {
117 dev_err(hr_dev
->dev
, "inline len(1-%d)=%d, illegal",
118 rc_sq_wqe
->msg_len
, hr_dev
->caps
.max_sq_inline
);
122 if (wr
->opcode
== IB_WR_RDMA_READ
) {
123 dev_err(hr_dev
->dev
, "Not support inline data!\n");
127 for (i
= 0; i
< wr
->num_sge
; i
++) {
128 memcpy(wqe
, ((void *)wr
->sg_list
[i
].addr
),
129 wr
->sg_list
[i
].length
);
130 wqe
+= wr
->sg_list
[i
].length
;
133 roce_set_bit(rc_sq_wqe
->byte_4
, V2_RC_SEND_WQE_BYTE_4_INLINE_S
,
136 if (wr
->num_sge
<= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
) {
137 for (i
= 0; i
< wr
->num_sge
; i
++) {
138 if (likely(wr
->sg_list
[i
].length
)) {
139 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
144 roce_set_field(rc_sq_wqe
->byte_20
,
145 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M
,
146 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S
,
147 (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
149 for (i
= 0; i
< HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
; i
++) {
150 if (likely(wr
->sg_list
[i
].length
)) {
151 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
156 set_extend_sge(qp
, wr
, sge_ind
);
159 roce_set_field(rc_sq_wqe
->byte_16
,
160 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M
,
161 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S
, wr
->num_sge
);
167 static int hns_roce_v2_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
168 struct ib_send_wr
**bad_wr
)
170 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
171 struct hns_roce_ah
*ah
= to_hr_ah(ud_wr(wr
)->ah
);
172 struct hns_roce_v2_ud_send_wqe
*ud_sq_wqe
;
173 struct hns_roce_v2_rc_send_wqe
*rc_sq_wqe
;
174 struct hns_roce_qp
*qp
= to_hr_qp(ibqp
);
175 struct hns_roce_v2_wqe_data_seg
*dseg
;
176 struct device
*dev
= hr_dev
->dev
;
177 struct hns_roce_v2_db sq_db
;
178 unsigned int sge_ind
= 0;
179 unsigned int owner_bit
;
190 if (unlikely(ibqp
->qp_type
!= IB_QPT_RC
&&
191 ibqp
->qp_type
!= IB_QPT_GSI
&&
192 ibqp
->qp_type
!= IB_QPT_UD
)) {
193 dev_err(dev
, "Not supported QP(0x%x)type!\n", ibqp
->qp_type
);
198 if (unlikely(qp
->state
== IB_QPS_RESET
|| qp
->state
== IB_QPS_INIT
||
199 qp
->state
== IB_QPS_RTR
)) {
200 dev_err(dev
, "Post WQE fail, QP state %d err!\n", qp
->state
);
205 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
206 ind
= qp
->sq_next_wqe
;
207 sge_ind
= qp
->next_sge
;
209 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
210 if (hns_roce_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
216 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
217 dev_err(dev
, "num_sge=%d > qp->sq.max_gs=%d\n",
218 wr
->num_sge
, qp
->sq
.max_gs
);
224 wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
225 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] =
229 ~(((qp
->sq
.head
+ nreq
) >> ilog2(qp
->sq
.wqe_cnt
)) & 0x1);
232 /* Corresponding to the QP type, wqe process separately */
233 if (ibqp
->qp_type
== IB_QPT_GSI
) {
235 memset(ud_sq_wqe
, 0, sizeof(*ud_sq_wqe
));
237 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_0_M
,
238 V2_UD_SEND_WQE_DMAC_0_S
, ah
->av
.mac
[0]);
239 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_1_M
,
240 V2_UD_SEND_WQE_DMAC_1_S
, ah
->av
.mac
[1]);
241 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_2_M
,
242 V2_UD_SEND_WQE_DMAC_2_S
, ah
->av
.mac
[2]);
243 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_3_M
,
244 V2_UD_SEND_WQE_DMAC_3_S
, ah
->av
.mac
[3]);
245 roce_set_field(ud_sq_wqe
->byte_48
,
246 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M
,
247 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S
,
249 roce_set_field(ud_sq_wqe
->byte_48
,
250 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M
,
251 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S
,
255 smac
= (u8
*)hr_dev
->dev_addr
[qp
->port
];
256 loopback
= ether_addr_equal_unaligned(ah
->av
.mac
,
259 roce_set_bit(ud_sq_wqe
->byte_40
,
260 V2_UD_SEND_WQE_BYTE_40_LBI_S
, loopback
);
262 roce_set_field(ud_sq_wqe
->byte_4
,
263 V2_UD_SEND_WQE_BYTE_4_OPCODE_M
,
264 V2_UD_SEND_WQE_BYTE_4_OPCODE_S
,
265 HNS_ROCE_V2_WQE_OP_SEND
);
267 for (i
= 0; i
< wr
->num_sge
; i
++)
268 tmp_len
+= wr
->sg_list
[i
].length
;
271 cpu_to_le32(le32_to_cpu(ud_sq_wqe
->msg_len
) + tmp_len
);
273 switch (wr
->opcode
) {
274 case IB_WR_SEND_WITH_IMM
:
275 case IB_WR_RDMA_WRITE_WITH_IMM
:
276 ud_sq_wqe
->immtdata
= wr
->ex
.imm_data
;
279 ud_sq_wqe
->immtdata
= 0;
284 roce_set_bit(ud_sq_wqe
->byte_4
,
285 V2_UD_SEND_WQE_BYTE_4_CQE_S
,
286 (wr
->send_flags
& IB_SEND_SIGNALED
) ? 1 : 0);
289 roce_set_bit(ud_sq_wqe
->byte_4
,
290 V2_UD_SEND_WQE_BYTE_4_SE_S
,
291 (wr
->send_flags
& IB_SEND_SOLICITED
) ? 1 : 0);
293 roce_set_bit(ud_sq_wqe
->byte_4
,
294 V2_UD_SEND_WQE_BYTE_4_OWNER_S
, owner_bit
);
296 roce_set_field(ud_sq_wqe
->byte_16
,
297 V2_UD_SEND_WQE_BYTE_16_PD_M
,
298 V2_UD_SEND_WQE_BYTE_16_PD_S
,
299 to_hr_pd(ibqp
->pd
)->pdn
);
301 roce_set_field(ud_sq_wqe
->byte_16
,
302 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M
,
303 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S
,
306 roce_set_field(ud_sq_wqe
->byte_20
,
307 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M
,
308 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S
,
309 sge_ind
& (qp
->sge
.sge_cnt
- 1));
311 roce_set_field(ud_sq_wqe
->byte_24
,
312 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M
,
313 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S
, 0);
315 cpu_to_le32(ud_wr(wr
)->remote_qkey
& 0x80000000 ?
316 qp
->qkey
: ud_wr(wr
)->remote_qkey
);
317 roce_set_field(ud_sq_wqe
->byte_32
,
318 V2_UD_SEND_WQE_BYTE_32_DQPN_M
,
319 V2_UD_SEND_WQE_BYTE_32_DQPN_S
,
320 ud_wr(wr
)->remote_qpn
);
322 roce_set_field(ud_sq_wqe
->byte_36
,
323 V2_UD_SEND_WQE_BYTE_36_VLAN_M
,
324 V2_UD_SEND_WQE_BYTE_36_VLAN_S
,
325 le16_to_cpu(ah
->av
.vlan
));
326 roce_set_field(ud_sq_wqe
->byte_36
,
327 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M
,
328 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S
,
330 roce_set_field(ud_sq_wqe
->byte_36
,
331 V2_UD_SEND_WQE_BYTE_36_TCLASS_M
,
332 V2_UD_SEND_WQE_BYTE_36_TCLASS_S
,
334 roce_set_field(ud_sq_wqe
->byte_36
,
335 V2_UD_SEND_WQE_BYTE_36_TCLASS_M
,
336 V2_UD_SEND_WQE_BYTE_36_TCLASS_S
,
338 roce_set_field(ud_sq_wqe
->byte_40
,
339 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M
,
340 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S
, 0);
341 roce_set_field(ud_sq_wqe
->byte_40
,
342 V2_UD_SEND_WQE_BYTE_40_SL_M
,
343 V2_UD_SEND_WQE_BYTE_40_SL_S
,
344 le32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >>
346 roce_set_field(ud_sq_wqe
->byte_40
,
347 V2_UD_SEND_WQE_BYTE_40_PORTN_M
,
348 V2_UD_SEND_WQE_BYTE_40_PORTN_S
,
351 roce_set_field(ud_sq_wqe
->byte_48
,
352 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M
,
353 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S
,
354 hns_get_gid_index(hr_dev
, qp
->phy_port
,
357 memcpy(&ud_sq_wqe
->dgid
[0], &ah
->av
.dgid
[0],
360 set_extend_sge(qp
, wr
, &sge_ind
);
362 } else if (ibqp
->qp_type
== IB_QPT_RC
) {
364 memset(rc_sq_wqe
, 0, sizeof(*rc_sq_wqe
));
365 for (i
= 0; i
< wr
->num_sge
; i
++)
366 tmp_len
+= wr
->sg_list
[i
].length
;
369 cpu_to_le32(le32_to_cpu(rc_sq_wqe
->msg_len
) + tmp_len
);
371 switch (wr
->opcode
) {
372 case IB_WR_SEND_WITH_IMM
:
373 case IB_WR_RDMA_WRITE_WITH_IMM
:
374 rc_sq_wqe
->immtdata
= wr
->ex
.imm_data
;
376 case IB_WR_SEND_WITH_INV
:
378 cpu_to_le32(wr
->ex
.invalidate_rkey
);
381 rc_sq_wqe
->immtdata
= 0;
385 roce_set_bit(rc_sq_wqe
->byte_4
,
386 V2_RC_SEND_WQE_BYTE_4_FENCE_S
,
387 (wr
->send_flags
& IB_SEND_FENCE
) ? 1 : 0);
389 roce_set_bit(rc_sq_wqe
->byte_4
,
390 V2_RC_SEND_WQE_BYTE_4_SE_S
,
391 (wr
->send_flags
& IB_SEND_SOLICITED
) ? 1 : 0);
393 roce_set_bit(rc_sq_wqe
->byte_4
,
394 V2_RC_SEND_WQE_BYTE_4_CQE_S
,
395 (wr
->send_flags
& IB_SEND_SIGNALED
) ? 1 : 0);
397 roce_set_bit(rc_sq_wqe
->byte_4
,
398 V2_RC_SEND_WQE_BYTE_4_OWNER_S
, owner_bit
);
400 switch (wr
->opcode
) {
401 case IB_WR_RDMA_READ
:
402 roce_set_field(rc_sq_wqe
->byte_4
,
403 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
404 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
405 HNS_ROCE_V2_WQE_OP_RDMA_READ
);
407 cpu_to_le32(rdma_wr(wr
)->rkey
);
409 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
411 case IB_WR_RDMA_WRITE
:
412 roce_set_field(rc_sq_wqe
->byte_4
,
413 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
414 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
415 HNS_ROCE_V2_WQE_OP_RDMA_WRITE
);
417 cpu_to_le32(rdma_wr(wr
)->rkey
);
419 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
421 case IB_WR_RDMA_WRITE_WITH_IMM
:
422 roce_set_field(rc_sq_wqe
->byte_4
,
423 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
424 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
425 HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM
);
427 cpu_to_le32(rdma_wr(wr
)->rkey
);
429 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
432 roce_set_field(rc_sq_wqe
->byte_4
,
433 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
434 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
435 HNS_ROCE_V2_WQE_OP_SEND
);
437 case IB_WR_SEND_WITH_INV
:
438 roce_set_field(rc_sq_wqe
->byte_4
,
439 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
440 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
441 HNS_ROCE_V2_WQE_OP_SEND_WITH_INV
);
443 case IB_WR_SEND_WITH_IMM
:
444 roce_set_field(rc_sq_wqe
->byte_4
,
445 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
446 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
447 HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM
);
449 case IB_WR_LOCAL_INV
:
450 roce_set_field(rc_sq_wqe
->byte_4
,
451 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
452 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
453 HNS_ROCE_V2_WQE_OP_LOCAL_INV
);
455 case IB_WR_ATOMIC_CMP_AND_SWP
:
456 roce_set_field(rc_sq_wqe
->byte_4
,
457 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
458 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
459 HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP
);
461 case IB_WR_ATOMIC_FETCH_AND_ADD
:
462 roce_set_field(rc_sq_wqe
->byte_4
,
463 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
464 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
465 HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD
);
467 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
468 roce_set_field(rc_sq_wqe
->byte_4
,
469 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
470 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
471 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP
);
473 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
474 roce_set_field(rc_sq_wqe
->byte_4
,
475 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
476 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
477 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD
);
480 roce_set_field(rc_sq_wqe
->byte_4
,
481 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
482 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
,
483 HNS_ROCE_V2_WQE_OP_MASK
);
487 wqe
+= sizeof(struct hns_roce_v2_rc_send_wqe
);
490 ret
= set_rwqe_data_seg(ibqp
, wr
, rc_sq_wqe
, wqe
,
496 dev_err(dev
, "Illegal qp_type(0x%x)\n", ibqp
->qp_type
);
497 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
512 roce_set_field(sq_db
.byte_4
, V2_DB_BYTE_4_TAG_M
,
513 V2_DB_BYTE_4_TAG_S
, qp
->doorbell_qpn
);
514 roce_set_field(sq_db
.byte_4
, V2_DB_BYTE_4_CMD_M
,
515 V2_DB_BYTE_4_CMD_S
, HNS_ROCE_V2_SQ_DB
);
516 roce_set_field(sq_db
.parameter
, V2_DB_PARAMETER_IDX_M
,
517 V2_DB_PARAMETER_IDX_S
,
518 qp
->sq
.head
& ((qp
->sq
.wqe_cnt
<< 1) - 1));
519 roce_set_field(sq_db
.parameter
, V2_DB_PARAMETER_SL_M
,
520 V2_DB_PARAMETER_SL_S
, qp
->sl
);
522 hns_roce_write64_k((__le32
*)&sq_db
, qp
->sq
.db_reg_l
);
524 qp
->sq_next_wqe
= ind
;
525 qp
->next_sge
= sge_ind
;
528 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
533 static int hns_roce_v2_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
534 struct ib_recv_wr
**bad_wr
)
536 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
537 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
538 struct hns_roce_v2_wqe_data_seg
*dseg
;
539 struct hns_roce_rinl_sge
*sge_list
;
540 struct device
*dev
= hr_dev
->dev
;
548 spin_lock_irqsave(&hr_qp
->rq
.lock
, flags
);
549 ind
= hr_qp
->rq
.head
& (hr_qp
->rq
.wqe_cnt
- 1);
551 if (hr_qp
->state
== IB_QPS_RESET
) {
552 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags
);
557 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
558 if (hns_roce_wq_overflow(&hr_qp
->rq
, nreq
,
559 hr_qp
->ibqp
.recv_cq
)) {
565 if (unlikely(wr
->num_sge
> hr_qp
->rq
.max_gs
)) {
566 dev_err(dev
, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
567 wr
->num_sge
, hr_qp
->rq
.max_gs
);
573 wqe
= get_recv_wqe(hr_qp
, ind
);
574 dseg
= (struct hns_roce_v2_wqe_data_seg
*)wqe
;
575 for (i
= 0; i
< wr
->num_sge
; i
++) {
576 if (!wr
->sg_list
[i
].length
)
578 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
582 if (i
< hr_qp
->rq
.max_gs
) {
583 dseg
->lkey
= cpu_to_le32(HNS_ROCE_INVALID_LKEY
);
587 /* rq support inline data */
588 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) {
589 sge_list
= hr_qp
->rq_inl_buf
.wqe_list
[ind
].sg_list
;
590 hr_qp
->rq_inl_buf
.wqe_list
[ind
].sge_cnt
=
592 for (i
= 0; i
< wr
->num_sge
; i
++) {
594 (void *)(u64
)wr
->sg_list
[i
].addr
;
595 sge_list
[i
].len
= wr
->sg_list
[i
].length
;
599 hr_qp
->rq
.wrid
[ind
] = wr
->wr_id
;
601 ind
= (ind
+ 1) & (hr_qp
->rq
.wqe_cnt
- 1);
606 hr_qp
->rq
.head
+= nreq
;
610 *hr_qp
->rdb
.db_record
= hr_qp
->rq
.head
& 0xffff;
612 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags
);
617 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring
*ring
)
619 int ntu
= ring
->next_to_use
;
620 int ntc
= ring
->next_to_clean
;
621 int used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
623 return ring
->desc_num
- used
- 1;
626 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev
*hr_dev
,
627 struct hns_roce_v2_cmq_ring
*ring
)
629 int size
= ring
->desc_num
* sizeof(struct hns_roce_cmq_desc
);
631 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
635 ring
->desc_dma_addr
= dma_map_single(hr_dev
->dev
, ring
->desc
, size
,
637 if (dma_mapping_error(hr_dev
->dev
, ring
->desc_dma_addr
)) {
638 ring
->desc_dma_addr
= 0;
647 static void hns_roce_free_cmq_desc(struct hns_roce_dev
*hr_dev
,
648 struct hns_roce_v2_cmq_ring
*ring
)
650 dma_unmap_single(hr_dev
->dev
, ring
->desc_dma_addr
,
651 ring
->desc_num
* sizeof(struct hns_roce_cmq_desc
),
654 ring
->desc_dma_addr
= 0;
658 static int hns_roce_init_cmq_ring(struct hns_roce_dev
*hr_dev
, bool ring_type
)
660 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
661 struct hns_roce_v2_cmq_ring
*ring
= (ring_type
== TYPE_CSQ
) ?
662 &priv
->cmq
.csq
: &priv
->cmq
.crq
;
664 ring
->flag
= ring_type
;
665 ring
->next_to_clean
= 0;
666 ring
->next_to_use
= 0;
668 return hns_roce_alloc_cmq_desc(hr_dev
, ring
);
671 static void hns_roce_cmq_init_regs(struct hns_roce_dev
*hr_dev
, bool ring_type
)
673 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
674 struct hns_roce_v2_cmq_ring
*ring
= (ring_type
== TYPE_CSQ
) ?
675 &priv
->cmq
.csq
: &priv
->cmq
.crq
;
676 dma_addr_t dma
= ring
->desc_dma_addr
;
678 if (ring_type
== TYPE_CSQ
) {
679 roce_write(hr_dev
, ROCEE_TX_CMQ_BASEADDR_L_REG
, (u32
)dma
);
680 roce_write(hr_dev
, ROCEE_TX_CMQ_BASEADDR_H_REG
,
682 roce_write(hr_dev
, ROCEE_TX_CMQ_DEPTH_REG
,
683 (ring
->desc_num
>> HNS_ROCE_CMQ_DESC_NUM_S
) |
684 HNS_ROCE_CMQ_ENABLE
);
685 roce_write(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
, 0);
686 roce_write(hr_dev
, ROCEE_TX_CMQ_TAIL_REG
, 0);
688 roce_write(hr_dev
, ROCEE_RX_CMQ_BASEADDR_L_REG
, (u32
)dma
);
689 roce_write(hr_dev
, ROCEE_RX_CMQ_BASEADDR_H_REG
,
691 roce_write(hr_dev
, ROCEE_RX_CMQ_DEPTH_REG
,
692 (ring
->desc_num
>> HNS_ROCE_CMQ_DESC_NUM_S
) |
693 HNS_ROCE_CMQ_ENABLE
);
694 roce_write(hr_dev
, ROCEE_RX_CMQ_HEAD_REG
, 0);
695 roce_write(hr_dev
, ROCEE_RX_CMQ_TAIL_REG
, 0);
699 static int hns_roce_v2_cmq_init(struct hns_roce_dev
*hr_dev
)
701 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
704 /* Setup the queue entries for command queue */
705 priv
->cmq
.csq
.desc_num
= 1024;
706 priv
->cmq
.crq
.desc_num
= 1024;
708 /* Setup the lock for command queue */
709 spin_lock_init(&priv
->cmq
.csq
.lock
);
710 spin_lock_init(&priv
->cmq
.crq
.lock
);
712 /* Setup Tx write back timeout */
713 priv
->cmq
.tx_timeout
= HNS_ROCE_CMQ_TX_TIMEOUT
;
716 ret
= hns_roce_init_cmq_ring(hr_dev
, TYPE_CSQ
);
718 dev_err(hr_dev
->dev
, "Init CSQ error, ret = %d.\n", ret
);
723 ret
= hns_roce_init_cmq_ring(hr_dev
, TYPE_CRQ
);
725 dev_err(hr_dev
->dev
, "Init CRQ error, ret = %d.\n", ret
);
730 hns_roce_cmq_init_regs(hr_dev
, TYPE_CSQ
);
733 hns_roce_cmq_init_regs(hr_dev
, TYPE_CRQ
);
738 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.csq
);
743 static void hns_roce_v2_cmq_exit(struct hns_roce_dev
*hr_dev
)
745 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
747 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.csq
);
748 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.crq
);
751 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc
*desc
,
752 enum hns_roce_opcode_type opcode
,
755 memset((void *)desc
, 0, sizeof(struct hns_roce_cmq_desc
));
756 desc
->opcode
= cpu_to_le16(opcode
);
758 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR
| HNS_ROCE_CMD_FLAG_IN
);
760 desc
->flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR
);
762 desc
->flag
&= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR
);
765 static int hns_roce_cmq_csq_done(struct hns_roce_dev
*hr_dev
)
767 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
768 u32 head
= roce_read(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
);
770 return head
== priv
->cmq
.csq
.next_to_use
;
773 static int hns_roce_cmq_csq_clean(struct hns_roce_dev
*hr_dev
)
775 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
776 struct hns_roce_v2_cmq_ring
*csq
= &priv
->cmq
.csq
;
777 struct hns_roce_cmq_desc
*desc
;
778 u16 ntc
= csq
->next_to_clean
;
782 desc
= &csq
->desc
[ntc
];
783 head
= roce_read(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
);
784 while (head
!= ntc
) {
785 memset(desc
, 0, sizeof(*desc
));
787 if (ntc
== csq
->desc_num
)
789 desc
= &csq
->desc
[ntc
];
792 csq
->next_to_clean
= ntc
;
797 static int hns_roce_cmq_send(struct hns_roce_dev
*hr_dev
,
798 struct hns_roce_cmq_desc
*desc
, int num
)
800 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
801 struct hns_roce_v2_cmq_ring
*csq
= &priv
->cmq
.csq
;
802 struct hns_roce_cmq_desc
*desc_to_use
;
803 bool complete
= false;
810 if (hr_dev
->is_reset
)
813 spin_lock_bh(&csq
->lock
);
815 if (num
> hns_roce_cmq_space(csq
)) {
816 spin_unlock_bh(&csq
->lock
);
821 * Record the location of desc in the cmq for this time
822 * which will be use for hardware to write back
824 ntc
= csq
->next_to_use
;
826 while (handle
< num
) {
827 desc_to_use
= &csq
->desc
[csq
->next_to_use
];
828 *desc_to_use
= desc
[handle
];
829 dev_dbg(hr_dev
->dev
, "set cmq desc:\n");
831 if (csq
->next_to_use
== csq
->desc_num
)
832 csq
->next_to_use
= 0;
836 /* Write to hardware */
837 roce_write(hr_dev
, ROCEE_TX_CMQ_TAIL_REG
, csq
->next_to_use
);
840 * If the command is sync, wait for the firmware to write back,
841 * if multi descriptors to be sent, use the first one to check
843 if ((desc
->flag
) & HNS_ROCE_CMD_FLAG_NO_INTR
) {
845 if (hns_roce_cmq_csq_done(hr_dev
))
849 } while (timeout
< priv
->cmq
.tx_timeout
);
852 if (hns_roce_cmq_csq_done(hr_dev
)) {
855 while (handle
< num
) {
856 /* get the result of hardware write back */
857 desc_to_use
= &csq
->desc
[ntc
];
858 desc
[handle
] = *desc_to_use
;
859 dev_dbg(hr_dev
->dev
, "Get cmq desc:\n");
860 desc_ret
= desc
[handle
].retval
;
861 if (desc_ret
== CMD_EXEC_SUCCESS
)
865 priv
->cmq
.last_status
= desc_ret
;
868 if (ntc
== csq
->desc_num
)
876 /* clean the command send queue */
877 handle
= hns_roce_cmq_csq_clean(hr_dev
);
879 dev_warn(hr_dev
->dev
, "Cleaned %d, need to clean %d\n",
882 spin_unlock_bh(&csq
->lock
);
887 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev
*hr_dev
)
889 struct hns_roce_query_version
*resp
;
890 struct hns_roce_cmq_desc desc
;
893 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_QUERY_HW_VER
, true);
894 ret
= hns_roce_cmq_send(hr_dev
, &desc
, 1);
898 resp
= (struct hns_roce_query_version
*)desc
.data
;
899 hr_dev
->hw_rev
= le32_to_cpu(resp
->rocee_hw_version
);
900 hr_dev
->vendor_id
= le32_to_cpu(resp
->rocee_vendor_id
);
905 static int hns_roce_config_global_param(struct hns_roce_dev
*hr_dev
)
907 struct hns_roce_cfg_global_param
*req
;
908 struct hns_roce_cmq_desc desc
;
910 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_GLOBAL_PARAM
,
913 req
= (struct hns_roce_cfg_global_param
*)desc
.data
;
914 memset(req
, 0, sizeof(*req
));
915 roce_set_field(req
->time_cfg_udp_port
,
916 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M
,
917 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S
, 0x3e8);
918 roce_set_field(req
->time_cfg_udp_port
,
919 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M
,
920 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S
, 0x12b7);
922 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
925 static int hns_roce_query_pf_resource(struct hns_roce_dev
*hr_dev
)
927 struct hns_roce_cmq_desc desc
[2];
928 struct hns_roce_pf_res
*res
;
932 for (i
= 0; i
< 2; i
++) {
933 hns_roce_cmq_setup_basic_desc(&desc
[i
],
934 HNS_ROCE_OPC_QUERY_PF_RES
, true);
937 desc
[i
].flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
939 desc
[i
].flag
&= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
942 ret
= hns_roce_cmq_send(hr_dev
, desc
, 2);
946 res
= (struct hns_roce_pf_res
*)desc
[0].data
;
948 hr_dev
->caps
.qpc_bt_num
= roce_get_field(res
->qpc_bt_idx_num
,
949 PF_RES_DATA_1_PF_QPC_BT_NUM_M
,
950 PF_RES_DATA_1_PF_QPC_BT_NUM_S
);
951 hr_dev
->caps
.srqc_bt_num
= roce_get_field(res
->srqc_bt_idx_num
,
952 PF_RES_DATA_2_PF_SRQC_BT_NUM_M
,
953 PF_RES_DATA_2_PF_SRQC_BT_NUM_S
);
954 hr_dev
->caps
.cqc_bt_num
= roce_get_field(res
->cqc_bt_idx_num
,
955 PF_RES_DATA_3_PF_CQC_BT_NUM_M
,
956 PF_RES_DATA_3_PF_CQC_BT_NUM_S
);
957 hr_dev
->caps
.mpt_bt_num
= roce_get_field(res
->mpt_bt_idx_num
,
958 PF_RES_DATA_4_PF_MPT_BT_NUM_M
,
959 PF_RES_DATA_4_PF_MPT_BT_NUM_S
);
964 static int hns_roce_alloc_vf_resource(struct hns_roce_dev
*hr_dev
)
966 struct hns_roce_cmq_desc desc
[2];
967 struct hns_roce_vf_res_a
*req_a
;
968 struct hns_roce_vf_res_b
*req_b
;
971 req_a
= (struct hns_roce_vf_res_a
*)desc
[0].data
;
972 req_b
= (struct hns_roce_vf_res_b
*)desc
[1].data
;
973 memset(req_a
, 0, sizeof(*req_a
));
974 memset(req_b
, 0, sizeof(*req_b
));
975 for (i
= 0; i
< 2; i
++) {
976 hns_roce_cmq_setup_basic_desc(&desc
[i
],
977 HNS_ROCE_OPC_ALLOC_VF_RES
, false);
980 desc
[i
].flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
982 desc
[i
].flag
&= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
985 roce_set_field(req_a
->vf_qpc_bt_idx_num
,
986 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M
,
987 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S
, 0);
988 roce_set_field(req_a
->vf_qpc_bt_idx_num
,
989 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M
,
990 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S
,
991 HNS_ROCE_VF_QPC_BT_NUM
);
993 roce_set_field(req_a
->vf_srqc_bt_idx_num
,
994 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M
,
995 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S
, 0);
996 roce_set_field(req_a
->vf_srqc_bt_idx_num
,
997 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M
,
998 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S
,
999 HNS_ROCE_VF_SRQC_BT_NUM
);
1001 roce_set_field(req_a
->vf_cqc_bt_idx_num
,
1002 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M
,
1003 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S
, 0);
1004 roce_set_field(req_a
->vf_cqc_bt_idx_num
,
1005 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M
,
1006 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S
,
1007 HNS_ROCE_VF_CQC_BT_NUM
);
1009 roce_set_field(req_a
->vf_mpt_bt_idx_num
,
1010 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M
,
1011 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S
, 0);
1012 roce_set_field(req_a
->vf_mpt_bt_idx_num
,
1013 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M
,
1014 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S
,
1015 HNS_ROCE_VF_MPT_BT_NUM
);
1017 roce_set_field(req_a
->vf_eqc_bt_idx_num
,
1018 VF_RES_A_DATA_5_VF_EQC_IDX_M
,
1019 VF_RES_A_DATA_5_VF_EQC_IDX_S
, 0);
1020 roce_set_field(req_a
->vf_eqc_bt_idx_num
,
1021 VF_RES_A_DATA_5_VF_EQC_NUM_M
,
1022 VF_RES_A_DATA_5_VF_EQC_NUM_S
,
1023 HNS_ROCE_VF_EQC_NUM
);
1025 roce_set_field(req_b
->vf_smac_idx_num
,
1026 VF_RES_B_DATA_1_VF_SMAC_IDX_M
,
1027 VF_RES_B_DATA_1_VF_SMAC_IDX_S
, 0);
1028 roce_set_field(req_b
->vf_smac_idx_num
,
1029 VF_RES_B_DATA_1_VF_SMAC_NUM_M
,
1030 VF_RES_B_DATA_1_VF_SMAC_NUM_S
,
1031 HNS_ROCE_VF_SMAC_NUM
);
1033 roce_set_field(req_b
->vf_sgid_idx_num
,
1034 VF_RES_B_DATA_2_VF_SGID_IDX_M
,
1035 VF_RES_B_DATA_2_VF_SGID_IDX_S
, 0);
1036 roce_set_field(req_b
->vf_sgid_idx_num
,
1037 VF_RES_B_DATA_2_VF_SGID_NUM_M
,
1038 VF_RES_B_DATA_2_VF_SGID_NUM_S
,
1039 HNS_ROCE_VF_SGID_NUM
);
1041 roce_set_field(req_b
->vf_qid_idx_sl_num
,
1042 VF_RES_B_DATA_3_VF_QID_IDX_M
,
1043 VF_RES_B_DATA_3_VF_QID_IDX_S
, 0);
1044 roce_set_field(req_b
->vf_qid_idx_sl_num
,
1045 VF_RES_B_DATA_3_VF_SL_NUM_M
,
1046 VF_RES_B_DATA_3_VF_SL_NUM_S
,
1047 HNS_ROCE_VF_SL_NUM
);
1051 return hns_roce_cmq_send(hr_dev
, desc
, 2);
1054 static int hns_roce_v2_set_bt(struct hns_roce_dev
*hr_dev
)
1056 u8 srqc_hop_num
= hr_dev
->caps
.srqc_hop_num
;
1057 u8 qpc_hop_num
= hr_dev
->caps
.qpc_hop_num
;
1058 u8 cqc_hop_num
= hr_dev
->caps
.cqc_hop_num
;
1059 u8 mpt_hop_num
= hr_dev
->caps
.mpt_hop_num
;
1060 struct hns_roce_cfg_bt_attr
*req
;
1061 struct hns_roce_cmq_desc desc
;
1063 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_BT_ATTR
, false);
1064 req
= (struct hns_roce_cfg_bt_attr
*)desc
.data
;
1065 memset(req
, 0, sizeof(*req
));
1067 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M
,
1068 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S
,
1069 hr_dev
->caps
.qpc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1070 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M
,
1071 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S
,
1072 hr_dev
->caps
.qpc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1073 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M
,
1074 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S
,
1075 qpc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : qpc_hop_num
);
1077 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M
,
1078 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S
,
1079 hr_dev
->caps
.srqc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1080 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M
,
1081 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S
,
1082 hr_dev
->caps
.srqc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1083 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M
,
1084 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S
,
1085 srqc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : srqc_hop_num
);
1087 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M
,
1088 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S
,
1089 hr_dev
->caps
.cqc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1090 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M
,
1091 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S
,
1092 hr_dev
->caps
.cqc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1093 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M
,
1094 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S
,
1095 cqc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : cqc_hop_num
);
1097 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M
,
1098 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S
,
1099 hr_dev
->caps
.mpt_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1100 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M
,
1101 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S
,
1102 hr_dev
->caps
.mpt_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1103 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M
,
1104 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S
,
1105 mpt_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : mpt_hop_num
);
1107 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1110 static int hns_roce_v2_profile(struct hns_roce_dev
*hr_dev
)
1112 struct hns_roce_caps
*caps
= &hr_dev
->caps
;
1115 ret
= hns_roce_cmq_query_hw_info(hr_dev
);
1117 dev_err(hr_dev
->dev
, "Query firmware version fail, ret = %d.\n",
1122 ret
= hns_roce_config_global_param(hr_dev
);
1124 dev_err(hr_dev
->dev
, "Configure global param fail, ret = %d.\n",
1129 /* Get pf resource owned by every pf */
1130 ret
= hns_roce_query_pf_resource(hr_dev
);
1132 dev_err(hr_dev
->dev
, "Query pf resource fail, ret = %d.\n",
1137 ret
= hns_roce_alloc_vf_resource(hr_dev
);
1139 dev_err(hr_dev
->dev
, "Allocate vf resource fail, ret = %d.\n",
1144 hr_dev
->vendor_part_id
= 0;
1145 hr_dev
->sys_image_guid
= 0;
1147 caps
->num_qps
= HNS_ROCE_V2_MAX_QP_NUM
;
1148 caps
->max_wqes
= HNS_ROCE_V2_MAX_WQE_NUM
;
1149 caps
->num_cqs
= HNS_ROCE_V2_MAX_CQ_NUM
;
1150 caps
->max_cqes
= HNS_ROCE_V2_MAX_CQE_NUM
;
1151 caps
->max_sq_sg
= HNS_ROCE_V2_MAX_SQ_SGE_NUM
;
1152 caps
->max_rq_sg
= HNS_ROCE_V2_MAX_RQ_SGE_NUM
;
1153 caps
->max_sq_inline
= HNS_ROCE_V2_MAX_SQ_INLINE
;
1154 caps
->num_uars
= HNS_ROCE_V2_UAR_NUM
;
1155 caps
->phy_num_uars
= HNS_ROCE_V2_PHY_UAR_NUM
;
1156 caps
->num_aeq_vectors
= HNS_ROCE_V2_AEQE_VEC_NUM
;
1157 caps
->num_comp_vectors
= HNS_ROCE_V2_COMP_VEC_NUM
;
1158 caps
->num_other_vectors
= HNS_ROCE_V2_ABNORMAL_VEC_NUM
;
1159 caps
->num_mtpts
= HNS_ROCE_V2_MAX_MTPT_NUM
;
1160 caps
->num_mtt_segs
= HNS_ROCE_V2_MAX_MTT_SEGS
;
1161 caps
->num_cqe_segs
= HNS_ROCE_V2_MAX_CQE_SEGS
;
1162 caps
->num_pds
= HNS_ROCE_V2_MAX_PD_NUM
;
1163 caps
->max_qp_init_rdma
= HNS_ROCE_V2_MAX_QP_INIT_RDMA
;
1164 caps
->max_qp_dest_rdma
= HNS_ROCE_V2_MAX_QP_DEST_RDMA
;
1165 caps
->max_sq_desc_sz
= HNS_ROCE_V2_MAX_SQ_DESC_SZ
;
1166 caps
->max_rq_desc_sz
= HNS_ROCE_V2_MAX_RQ_DESC_SZ
;
1167 caps
->max_srq_desc_sz
= HNS_ROCE_V2_MAX_SRQ_DESC_SZ
;
1168 caps
->qpc_entry_sz
= HNS_ROCE_V2_QPC_ENTRY_SZ
;
1169 caps
->irrl_entry_sz
= HNS_ROCE_V2_IRRL_ENTRY_SZ
;
1170 caps
->trrl_entry_sz
= HNS_ROCE_V2_TRRL_ENTRY_SZ
;
1171 caps
->cqc_entry_sz
= HNS_ROCE_V2_CQC_ENTRY_SZ
;
1172 caps
->mtpt_entry_sz
= HNS_ROCE_V2_MTPT_ENTRY_SZ
;
1173 caps
->mtt_entry_sz
= HNS_ROCE_V2_MTT_ENTRY_SZ
;
1174 caps
->cq_entry_sz
= HNS_ROCE_V2_CQE_ENTRY_SIZE
;
1175 caps
->page_size_cap
= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED
;
1176 caps
->reserved_lkey
= 0;
1177 caps
->reserved_pds
= 0;
1178 caps
->reserved_mrws
= 1;
1179 caps
->reserved_uars
= 0;
1180 caps
->reserved_cqs
= 0;
1182 caps
->qpc_ba_pg_sz
= 0;
1183 caps
->qpc_buf_pg_sz
= 0;
1184 caps
->qpc_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1185 caps
->srqc_ba_pg_sz
= 0;
1186 caps
->srqc_buf_pg_sz
= 0;
1187 caps
->srqc_hop_num
= HNS_ROCE_HOP_NUM_0
;
1188 caps
->cqc_ba_pg_sz
= 0;
1189 caps
->cqc_buf_pg_sz
= 0;
1190 caps
->cqc_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1191 caps
->mpt_ba_pg_sz
= 0;
1192 caps
->mpt_buf_pg_sz
= 0;
1193 caps
->mpt_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1194 caps
->pbl_ba_pg_sz
= 0;
1195 caps
->pbl_buf_pg_sz
= 0;
1196 caps
->pbl_hop_num
= HNS_ROCE_PBL_HOP_NUM
;
1197 caps
->mtt_ba_pg_sz
= 0;
1198 caps
->mtt_buf_pg_sz
= 0;
1199 caps
->mtt_hop_num
= HNS_ROCE_MTT_HOP_NUM
;
1200 caps
->cqe_ba_pg_sz
= 0;
1201 caps
->cqe_buf_pg_sz
= 0;
1202 caps
->cqe_hop_num
= HNS_ROCE_CQE_HOP_NUM
;
1203 caps
->eqe_ba_pg_sz
= 0;
1204 caps
->eqe_buf_pg_sz
= 0;
1205 caps
->eqe_hop_num
= HNS_ROCE_EQE_HOP_NUM
;
1206 caps
->chunk_sz
= HNS_ROCE_V2_TABLE_CHUNK_SIZE
;
1208 caps
->flags
= HNS_ROCE_CAP_FLAG_REREG_MR
|
1209 HNS_ROCE_CAP_FLAG_ROCE_V1_V2
|
1210 HNS_ROCE_CAP_FLAG_RQ_INLINE
|
1211 HNS_ROCE_CAP_FLAG_RECORD_DB
;
1212 caps
->pkey_table_len
[0] = 1;
1213 caps
->gid_table_len
[0] = HNS_ROCE_V2_GID_INDEX_NUM
;
1214 caps
->ceqe_depth
= HNS_ROCE_V2_COMP_EQE_NUM
;
1215 caps
->aeqe_depth
= HNS_ROCE_V2_ASYNC_EQE_NUM
;
1216 caps
->local_ca_ack_delay
= 0;
1217 caps
->max_mtu
= IB_MTU_4096
;
1219 ret
= hns_roce_v2_set_bt(hr_dev
);
1221 dev_err(hr_dev
->dev
, "Configure bt attribute fail, ret = %d.\n",
1227 static int hns_roce_v2_cmd_pending(struct hns_roce_dev
*hr_dev
)
1229 u32 status
= readl(hr_dev
->reg_base
+ ROCEE_VF_MB_STATUS_REG
);
1231 return status
>> HNS_ROCE_HW_RUN_BIT_SHIFT
;
1234 static int hns_roce_v2_cmd_complete(struct hns_roce_dev
*hr_dev
)
1236 u32 status
= readl(hr_dev
->reg_base
+ ROCEE_VF_MB_STATUS_REG
);
1238 return status
& HNS_ROCE_HW_MB_STATUS_MASK
;
1241 static int hns_roce_v2_post_mbox(struct hns_roce_dev
*hr_dev
, u64 in_param
,
1242 u64 out_param
, u32 in_modifier
, u8 op_modifier
,
1243 u16 op
, u16 token
, int event
)
1245 struct device
*dev
= hr_dev
->dev
;
1246 u32 __iomem
*hcr
= (u32 __iomem
*)(hr_dev
->reg_base
+
1247 ROCEE_VF_MB_CFG0_REG
);
1252 end
= msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS
) + jiffies
;
1253 while (hns_roce_v2_cmd_pending(hr_dev
)) {
1254 if (time_after(jiffies
, end
)) {
1255 dev_dbg(dev
, "jiffies=%d end=%d\n", (int)jiffies
,
1262 roce_set_field(val0
, HNS_ROCE_VF_MB4_TAG_MASK
,
1263 HNS_ROCE_VF_MB4_TAG_SHIFT
, in_modifier
);
1264 roce_set_field(val0
, HNS_ROCE_VF_MB4_CMD_MASK
,
1265 HNS_ROCE_VF_MB4_CMD_SHIFT
, op
);
1266 roce_set_field(val1
, HNS_ROCE_VF_MB5_EVENT_MASK
,
1267 HNS_ROCE_VF_MB5_EVENT_SHIFT
, event
);
1268 roce_set_field(val1
, HNS_ROCE_VF_MB5_TOKEN_MASK
,
1269 HNS_ROCE_VF_MB5_TOKEN_SHIFT
, token
);
1271 writeq(in_param
, hcr
+ 0);
1272 writeq(out_param
, hcr
+ 2);
1274 /* Memory barrier */
1277 writel(val0
, hcr
+ 4);
1278 writel(val1
, hcr
+ 5);
1285 static int hns_roce_v2_chk_mbox(struct hns_roce_dev
*hr_dev
,
1286 unsigned long timeout
)
1288 struct device
*dev
= hr_dev
->dev
;
1289 unsigned long end
= 0;
1292 end
= msecs_to_jiffies(timeout
) + jiffies
;
1293 while (hns_roce_v2_cmd_pending(hr_dev
) && time_before(jiffies
, end
))
1296 if (hns_roce_v2_cmd_pending(hr_dev
)) {
1297 dev_err(dev
, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1301 status
= hns_roce_v2_cmd_complete(hr_dev
);
1302 if (status
!= 0x1) {
1303 dev_err(dev
, "mailbox status 0x%x!\n", status
);
1310 static int hns_roce_v2_set_gid(struct hns_roce_dev
*hr_dev
, u8 port
,
1311 int gid_index
, union ib_gid
*gid
,
1312 const struct ib_gid_attr
*attr
)
1314 enum hns_roce_sgid_type sgid_type
= GID_TYPE_FLAG_ROCE_V1
;
1321 if (attr
->gid_type
== IB_GID_TYPE_ROCE
)
1322 sgid_type
= GID_TYPE_FLAG_ROCE_V1
;
1324 if (attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
1325 if (ipv6_addr_v4mapped((void *)gid
))
1326 sgid_type
= GID_TYPE_FLAG_ROCE_V2_IPV4
;
1328 sgid_type
= GID_TYPE_FLAG_ROCE_V2_IPV6
;
1331 p
= (u32
*)&gid
->raw
[0];
1332 roce_raw_write(*p
, hr_dev
->reg_base
+ ROCEE_VF_SGID_CFG0_REG
+
1335 p
= (u32
*)&gid
->raw
[4];
1336 roce_raw_write(*p
, hr_dev
->reg_base
+ ROCEE_VF_SGID_CFG1_REG
+
1339 p
= (u32
*)&gid
->raw
[8];
1340 roce_raw_write(*p
, hr_dev
->reg_base
+ ROCEE_VF_SGID_CFG2_REG
+
1343 p
= (u32
*)&gid
->raw
[0xc];
1344 roce_raw_write(*p
, hr_dev
->reg_base
+ ROCEE_VF_SGID_CFG3_REG
+
1347 val
= roce_read(hr_dev
, ROCEE_VF_SGID_CFG4_REG
+ 0x20 * gid_index
);
1348 roce_set_field(val
, ROCEE_VF_SGID_CFG4_SGID_TYPE_M
,
1349 ROCEE_VF_SGID_CFG4_SGID_TYPE_S
, sgid_type
);
1351 roce_write(hr_dev
, ROCEE_VF_SGID_CFG4_REG
+ 0x20 * gid_index
, val
);
1356 static int hns_roce_v2_set_mac(struct hns_roce_dev
*hr_dev
, u8 phy_port
,
1363 reg_smac_l
= *(u32
*)(&addr
[0]);
1364 roce_raw_write(reg_smac_l
, hr_dev
->reg_base
+ ROCEE_VF_SMAC_CFG0_REG
+
1366 val
= roce_read(hr_dev
, ROCEE_VF_SMAC_CFG1_REG
+ 0x08 * phy_port
);
1368 reg_smac_h
= *(u16
*)(&addr
[4]);
1369 roce_set_field(val
, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M
,
1370 ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S
, reg_smac_h
);
1371 roce_write(hr_dev
, ROCEE_VF_SMAC_CFG1_REG
+ 0x08 * phy_port
, val
);
1376 static int hns_roce_v2_write_mtpt(void *mb_buf
, struct hns_roce_mr
*mr
,
1377 unsigned long mtpt_idx
)
1379 struct hns_roce_v2_mpt_entry
*mpt_entry
;
1380 struct scatterlist
*sg
;
1388 memset(mpt_entry
, 0, sizeof(*mpt_entry
));
1390 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_MPT_ST_M
,
1391 V2_MPT_BYTE_4_MPT_ST_S
, V2_MPT_ST_VALID
);
1392 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PBL_HOP_NUM_M
,
1393 V2_MPT_BYTE_4_PBL_HOP_NUM_S
, mr
->pbl_hop_num
==
1394 HNS_ROCE_HOP_NUM_0
? 0 : mr
->pbl_hop_num
);
1395 roce_set_field(mpt_entry
->byte_4_pd_hop_st
,
1396 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M
,
1397 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S
,
1398 mr
->pbl_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1399 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
1400 V2_MPT_BYTE_4_PD_S
, mr
->pd
);
1401 mpt_entry
->byte_4_pd_hop_st
= cpu_to_le32(mpt_entry
->byte_4_pd_hop_st
);
1403 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RA_EN_S
, 0);
1404 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_R_INV_EN_S
, 1);
1405 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_L_INV_EN_S
, 0);
1406 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_BIND_EN_S
,
1407 (mr
->access
& IB_ACCESS_MW_BIND
? 1 : 0));
1408 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_ATOMIC_EN_S
, 0);
1409 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RR_EN_S
,
1410 (mr
->access
& IB_ACCESS_REMOTE_READ
? 1 : 0));
1411 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RW_EN_S
,
1412 (mr
->access
& IB_ACCESS_REMOTE_WRITE
? 1 : 0));
1413 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_LW_EN_S
,
1414 (mr
->access
& IB_ACCESS_LOCAL_WRITE
? 1 : 0));
1415 mpt_entry
->byte_8_mw_cnt_en
= cpu_to_le32(mpt_entry
->byte_8_mw_cnt_en
);
1417 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_PA_S
,
1418 mr
->type
== MR_TYPE_MR
? 0 : 1);
1419 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_INNER_PA_VLD_S
,
1421 mpt_entry
->byte_12_mw_pa
= cpu_to_le32(mpt_entry
->byte_12_mw_pa
);
1423 mpt_entry
->len_l
= cpu_to_le32(lower_32_bits(mr
->size
));
1424 mpt_entry
->len_h
= cpu_to_le32(upper_32_bits(mr
->size
));
1425 mpt_entry
->lkey
= cpu_to_le32(mr
->key
);
1426 mpt_entry
->va_l
= cpu_to_le32(lower_32_bits(mr
->iova
));
1427 mpt_entry
->va_h
= cpu_to_le32(upper_32_bits(mr
->iova
));
1429 if (mr
->type
== MR_TYPE_DMA
)
1432 mpt_entry
->pbl_size
= cpu_to_le32(mr
->pbl_size
);
1434 mpt_entry
->pbl_ba_l
= cpu_to_le32(lower_32_bits(mr
->pbl_ba
>> 3));
1435 roce_set_field(mpt_entry
->byte_48_mode_ba
, V2_MPT_BYTE_48_PBL_BA_H_M
,
1436 V2_MPT_BYTE_48_PBL_BA_H_S
,
1437 upper_32_bits(mr
->pbl_ba
>> 3));
1438 mpt_entry
->byte_48_mode_ba
= cpu_to_le32(mpt_entry
->byte_48_mode_ba
);
1440 pages
= (u64
*)__get_free_page(GFP_KERNEL
);
1445 for_each_sg(mr
->umem
->sg_head
.sgl
, sg
, mr
->umem
->nmap
, entry
) {
1446 len
= sg_dma_len(sg
) >> PAGE_SHIFT
;
1447 for (j
= 0; j
< len
; ++j
) {
1448 page_addr
= sg_dma_address(sg
) +
1449 (j
<< mr
->umem
->page_shift
);
1450 pages
[i
] = page_addr
>> 6;
1452 /* Record the first 2 entry directly to MTPT table */
1453 if (i
>= HNS_ROCE_V2_MAX_INNER_MTPT_NUM
- 1)
1460 mpt_entry
->pa0_l
= cpu_to_le32(lower_32_bits(pages
[0]));
1461 roce_set_field(mpt_entry
->byte_56_pa0_h
, V2_MPT_BYTE_56_PA0_H_M
,
1462 V2_MPT_BYTE_56_PA0_H_S
,
1463 upper_32_bits(pages
[0]));
1464 mpt_entry
->byte_56_pa0_h
= cpu_to_le32(mpt_entry
->byte_56_pa0_h
);
1466 mpt_entry
->pa1_l
= cpu_to_le32(lower_32_bits(pages
[1]));
1467 roce_set_field(mpt_entry
->byte_64_buf_pa1
, V2_MPT_BYTE_64_PA1_H_M
,
1468 V2_MPT_BYTE_64_PA1_H_S
, upper_32_bits(pages
[1]));
1470 free_page((unsigned long)pages
);
1472 roce_set_field(mpt_entry
->byte_64_buf_pa1
,
1473 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M
,
1474 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S
,
1475 mr
->pbl_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1476 mpt_entry
->byte_64_buf_pa1
= cpu_to_le32(mpt_entry
->byte_64_buf_pa1
);
1481 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev
*hr_dev
,
1482 struct hns_roce_mr
*mr
, int flags
,
1483 u32 pdn
, int mr_access_flags
, u64 iova
,
1484 u64 size
, void *mb_buf
)
1486 struct hns_roce_v2_mpt_entry
*mpt_entry
= mb_buf
;
1488 if (flags
& IB_MR_REREG_PD
) {
1489 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
1490 V2_MPT_BYTE_4_PD_S
, pdn
);
1494 if (flags
& IB_MR_REREG_ACCESS
) {
1495 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
,
1496 V2_MPT_BYTE_8_BIND_EN_S
,
1497 (mr_access_flags
& IB_ACCESS_MW_BIND
? 1 : 0));
1498 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
,
1499 V2_MPT_BYTE_8_ATOMIC_EN_S
,
1500 (mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
? 1 : 0));
1501 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RR_EN_S
,
1502 (mr_access_flags
& IB_ACCESS_REMOTE_READ
? 1 : 0));
1503 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RW_EN_S
,
1504 (mr_access_flags
& IB_ACCESS_REMOTE_WRITE
? 1 : 0));
1505 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_LW_EN_S
,
1506 (mr_access_flags
& IB_ACCESS_LOCAL_WRITE
? 1 : 0));
1509 if (flags
& IB_MR_REREG_TRANS
) {
1510 mpt_entry
->va_l
= cpu_to_le32(lower_32_bits(iova
));
1511 mpt_entry
->va_h
= cpu_to_le32(upper_32_bits(iova
));
1512 mpt_entry
->len_l
= cpu_to_le32(lower_32_bits(size
));
1513 mpt_entry
->len_h
= cpu_to_le32(upper_32_bits(size
));
1515 mpt_entry
->pbl_size
= cpu_to_le32(mr
->pbl_size
);
1516 mpt_entry
->pbl_ba_l
=
1517 cpu_to_le32(lower_32_bits(mr
->pbl_ba
>> 3));
1518 roce_set_field(mpt_entry
->byte_48_mode_ba
,
1519 V2_MPT_BYTE_48_PBL_BA_H_M
,
1520 V2_MPT_BYTE_48_PBL_BA_H_S
,
1521 upper_32_bits(mr
->pbl_ba
>> 3));
1522 mpt_entry
->byte_48_mode_ba
=
1523 cpu_to_le32(mpt_entry
->byte_48_mode_ba
);
1532 static void *get_cqe_v2(struct hns_roce_cq
*hr_cq
, int n
)
1534 return hns_roce_buf_offset(&hr_cq
->hr_buf
.hr_buf
,
1535 n
* HNS_ROCE_V2_CQE_ENTRY_SIZE
);
1538 static void *get_sw_cqe_v2(struct hns_roce_cq
*hr_cq
, int n
)
1540 struct hns_roce_v2_cqe
*cqe
= get_cqe_v2(hr_cq
, n
& hr_cq
->ib_cq
.cqe
);
1542 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1543 return (roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_OWNER_S
) ^
1544 !!(n
& (hr_cq
->ib_cq
.cqe
+ 1))) ? cqe
: NULL
;
1547 static struct hns_roce_v2_cqe
*next_cqe_sw_v2(struct hns_roce_cq
*hr_cq
)
1549 return get_sw_cqe_v2(hr_cq
, hr_cq
->cons_index
);
1552 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq
*hr_cq
, u32 cons_index
)
1554 *hr_cq
->set_ci_db
= cons_index
& 0xffffff;
1557 static void __hns_roce_v2_cq_clean(struct hns_roce_cq
*hr_cq
, u32 qpn
,
1558 struct hns_roce_srq
*srq
)
1560 struct hns_roce_v2_cqe
*cqe
, *dest
;
1565 for (prod_index
= hr_cq
->cons_index
; get_sw_cqe_v2(hr_cq
, prod_index
);
1567 if (prod_index
== hr_cq
->cons_index
+ hr_cq
->ib_cq
.cqe
)
1572 * Now backwards through the CQ, removing CQ entries
1573 * that match our QP by overwriting them with next entries.
1575 while ((int) --prod_index
- (int) hr_cq
->cons_index
>= 0) {
1576 cqe
= get_cqe_v2(hr_cq
, prod_index
& hr_cq
->ib_cq
.cqe
);
1577 if ((roce_get_field(cqe
->byte_16
, V2_CQE_BYTE_16_LCL_QPN_M
,
1578 V2_CQE_BYTE_16_LCL_QPN_S
) &
1579 HNS_ROCE_V2_CQE_QPN_MASK
) == qpn
) {
1580 /* In v1 engine, not support SRQ */
1582 } else if (nfreed
) {
1583 dest
= get_cqe_v2(hr_cq
, (prod_index
+ nfreed
) &
1585 owner_bit
= roce_get_bit(dest
->byte_4
,
1586 V2_CQE_BYTE_4_OWNER_S
);
1587 memcpy(dest
, cqe
, sizeof(*cqe
));
1588 roce_set_bit(dest
->byte_4
, V2_CQE_BYTE_4_OWNER_S
,
1594 hr_cq
->cons_index
+= nfreed
;
1596 * Make sure update of buffer contents is done before
1597 * updating consumer index.
1600 hns_roce_v2_cq_set_ci(hr_cq
, hr_cq
->cons_index
);
1604 static void hns_roce_v2_cq_clean(struct hns_roce_cq
*hr_cq
, u32 qpn
,
1605 struct hns_roce_srq
*srq
)
1607 spin_lock_irq(&hr_cq
->lock
);
1608 __hns_roce_v2_cq_clean(hr_cq
, qpn
, srq
);
1609 spin_unlock_irq(&hr_cq
->lock
);
1612 static void hns_roce_v2_write_cqc(struct hns_roce_dev
*hr_dev
,
1613 struct hns_roce_cq
*hr_cq
, void *mb_buf
,
1614 u64
*mtts
, dma_addr_t dma_handle
, int nent
,
1617 struct hns_roce_v2_cq_context
*cq_context
;
1619 cq_context
= mb_buf
;
1620 memset(cq_context
, 0, sizeof(*cq_context
));
1622 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_CQ_ST_M
,
1623 V2_CQC_BYTE_4_CQ_ST_S
, V2_CQ_STATE_VALID
);
1624 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_ARM_ST_M
,
1625 V2_CQC_BYTE_4_ARM_ST_S
, REG_NXT_CEQE
);
1626 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_SHIFT_M
,
1627 V2_CQC_BYTE_4_SHIFT_S
, ilog2((unsigned int)nent
));
1628 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_CEQN_M
,
1629 V2_CQC_BYTE_4_CEQN_S
, vector
);
1630 cq_context
->byte_4_pg_ceqn
= cpu_to_le32(cq_context
->byte_4_pg_ceqn
);
1632 roce_set_field(cq_context
->byte_8_cqn
, V2_CQC_BYTE_8_CQN_M
,
1633 V2_CQC_BYTE_8_CQN_S
, hr_cq
->cqn
);
1635 cq_context
->cqe_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
1636 cq_context
->cqe_cur_blk_addr
=
1637 cpu_to_le32(cq_context
->cqe_cur_blk_addr
);
1639 roce_set_field(cq_context
->byte_16_hop_addr
,
1640 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M
,
1641 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S
,
1642 cpu_to_le32((mtts
[0]) >> (32 + PAGE_ADDR_SHIFT
)));
1643 roce_set_field(cq_context
->byte_16_hop_addr
,
1644 V2_CQC_BYTE_16_CQE_HOP_NUM_M
,
1645 V2_CQC_BYTE_16_CQE_HOP_NUM_S
, hr_dev
->caps
.cqe_hop_num
==
1646 HNS_ROCE_HOP_NUM_0
? 0 : hr_dev
->caps
.cqe_hop_num
);
1648 cq_context
->cqe_nxt_blk_addr
= (u32
)(mtts
[1] >> PAGE_ADDR_SHIFT
);
1649 roce_set_field(cq_context
->byte_24_pgsz_addr
,
1650 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M
,
1651 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S
,
1652 cpu_to_le32((mtts
[1]) >> (32 + PAGE_ADDR_SHIFT
)));
1653 roce_set_field(cq_context
->byte_24_pgsz_addr
,
1654 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M
,
1655 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S
,
1656 hr_dev
->caps
.cqe_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1657 roce_set_field(cq_context
->byte_24_pgsz_addr
,
1658 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M
,
1659 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S
,
1660 hr_dev
->caps
.cqe_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1662 cq_context
->cqe_ba
= (u32
)(dma_handle
>> 3);
1664 roce_set_field(cq_context
->byte_40_cqe_ba
, V2_CQC_BYTE_40_CQE_BA_M
,
1665 V2_CQC_BYTE_40_CQE_BA_S
, (dma_handle
>> (32 + 3)));
1668 roce_set_bit(cq_context
->byte_44_db_record
,
1669 V2_CQC_BYTE_44_DB_RECORD_EN_S
, 1);
1671 roce_set_field(cq_context
->byte_44_db_record
,
1672 V2_CQC_BYTE_44_DB_RECORD_ADDR_M
,
1673 V2_CQC_BYTE_44_DB_RECORD_ADDR_S
,
1674 ((u32
)hr_cq
->db
.dma
) >> 1);
1675 cq_context
->db_record_addr
= hr_cq
->db
.dma
>> 32;
1677 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
1678 V2_CQC_BYTE_56_CQ_MAX_CNT_M
,
1679 V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
1680 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM
);
1681 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
1682 V2_CQC_BYTE_56_CQ_PERIOD_M
,
1683 V2_CQC_BYTE_56_CQ_PERIOD_S
,
1684 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL
);
1687 static int hns_roce_v2_req_notify_cq(struct ib_cq
*ibcq
,
1688 enum ib_cq_notify_flags flags
)
1690 struct hns_roce_cq
*hr_cq
= to_hr_cq(ibcq
);
1691 u32 notification_flag
;
1697 notification_flag
= (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
1698 V2_CQ_DB_REQ_NOT
: V2_CQ_DB_REQ_NOT_SOL
;
1700 * flags = 0; Notification Flag = 1, next
1701 * flags = 1; Notification Flag = 0, solocited
1703 roce_set_field(doorbell
[0], V2_CQ_DB_BYTE_4_TAG_M
, V2_DB_BYTE_4_TAG_S
,
1705 roce_set_field(doorbell
[0], V2_CQ_DB_BYTE_4_CMD_M
, V2_DB_BYTE_4_CMD_S
,
1706 HNS_ROCE_V2_CQ_DB_NTR
);
1707 roce_set_field(doorbell
[1], V2_CQ_DB_PARAMETER_CONS_IDX_M
,
1708 V2_CQ_DB_PARAMETER_CONS_IDX_S
,
1709 hr_cq
->cons_index
& ((hr_cq
->cq_depth
<< 1) - 1));
1710 roce_set_field(doorbell
[1], V2_CQ_DB_PARAMETER_CMD_SN_M
,
1711 V2_CQ_DB_PARAMETER_CMD_SN_S
, hr_cq
->arm_sn
& 0x3);
1712 roce_set_bit(doorbell
[1], V2_CQ_DB_PARAMETER_NOTIFY_S
,
1715 hns_roce_write64_k(doorbell
, hr_cq
->cq_db_l
);
1720 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe
*cqe
,
1721 struct hns_roce_qp
**cur_qp
,
1724 struct hns_roce_rinl_sge
*sge_list
;
1725 u32 wr_num
, wr_cnt
, sge_num
;
1726 u32 sge_cnt
, data_len
, size
;
1729 wr_num
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_WQE_INDX_M
,
1730 V2_CQE_BYTE_4_WQE_INDX_S
) & 0xffff;
1731 wr_cnt
= wr_num
& ((*cur_qp
)->rq
.wqe_cnt
- 1);
1733 sge_list
= (*cur_qp
)->rq_inl_buf
.wqe_list
[wr_cnt
].sg_list
;
1734 sge_num
= (*cur_qp
)->rq_inl_buf
.wqe_list
[wr_cnt
].sge_cnt
;
1735 wqe_buf
= get_recv_wqe(*cur_qp
, wr_cnt
);
1736 data_len
= wc
->byte_len
;
1738 for (sge_cnt
= 0; (sge_cnt
< sge_num
) && (data_len
); sge_cnt
++) {
1739 size
= min(sge_list
[sge_cnt
].len
, data_len
);
1740 memcpy((void *)sge_list
[sge_cnt
].addr
, wqe_buf
, size
);
1747 wc
->status
= IB_WC_LOC_LEN_ERR
;
1754 static int hns_roce_v2_poll_one(struct hns_roce_cq
*hr_cq
,
1755 struct hns_roce_qp
**cur_qp
, struct ib_wc
*wc
)
1757 struct hns_roce_dev
*hr_dev
;
1758 struct hns_roce_v2_cqe
*cqe
;
1759 struct hns_roce_qp
*hr_qp
;
1760 struct hns_roce_wq
*wq
;
1768 /* Find cqe according to consumer index */
1769 cqe
= next_cqe_sw_v2(hr_cq
);
1773 ++hr_cq
->cons_index
;
1774 /* Memory barrier */
1778 is_send
= !roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_S_R_S
);
1780 qpn
= roce_get_field(cqe
->byte_16
, V2_CQE_BYTE_16_LCL_QPN_M
,
1781 V2_CQE_BYTE_16_LCL_QPN_S
);
1783 if (!*cur_qp
|| (qpn
& HNS_ROCE_V2_CQE_QPN_MASK
) != (*cur_qp
)->qpn
) {
1784 hr_dev
= to_hr_dev(hr_cq
->ib_cq
.device
);
1785 hr_qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
1786 if (unlikely(!hr_qp
)) {
1787 dev_err(hr_dev
->dev
, "CQ %06lx with entry for unknown QPN %06x\n",
1788 hr_cq
->cqn
, (qpn
& HNS_ROCE_V2_CQE_QPN_MASK
));
1794 wc
->qp
= &(*cur_qp
)->ibqp
;
1797 status
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_STATUS_M
,
1798 V2_CQE_BYTE_4_STATUS_S
);
1799 switch (status
& HNS_ROCE_V2_CQE_STATUS_MASK
) {
1800 case HNS_ROCE_CQE_V2_SUCCESS
:
1801 wc
->status
= IB_WC_SUCCESS
;
1803 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR
:
1804 wc
->status
= IB_WC_LOC_LEN_ERR
;
1806 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR
:
1807 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
1809 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR
:
1810 wc
->status
= IB_WC_LOC_PROT_ERR
;
1812 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR
:
1813 wc
->status
= IB_WC_WR_FLUSH_ERR
;
1815 case HNS_ROCE_CQE_V2_MW_BIND_ERR
:
1816 wc
->status
= IB_WC_MW_BIND_ERR
;
1818 case HNS_ROCE_CQE_V2_BAD_RESP_ERR
:
1819 wc
->status
= IB_WC_BAD_RESP_ERR
;
1821 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR
:
1822 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
1824 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR
:
1825 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
1827 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR
:
1828 wc
->status
= IB_WC_REM_ACCESS_ERR
;
1830 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR
:
1831 wc
->status
= IB_WC_REM_OP_ERR
;
1833 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR
:
1834 wc
->status
= IB_WC_RETRY_EXC_ERR
;
1836 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR
:
1837 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
1839 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR
:
1840 wc
->status
= IB_WC_REM_ABORT_ERR
;
1843 wc
->status
= IB_WC_GENERAL_ERR
;
1847 /* CQE status error, directly return */
1848 if (wc
->status
!= IB_WC_SUCCESS
)
1853 /* SQ corresponding to CQE */
1854 switch (roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_OPCODE_M
,
1855 V2_CQE_BYTE_4_OPCODE_S
) & 0x1f) {
1856 case HNS_ROCE_SQ_OPCODE_SEND
:
1857 wc
->opcode
= IB_WC_SEND
;
1859 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV
:
1860 wc
->opcode
= IB_WC_SEND
;
1862 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM
:
1863 wc
->opcode
= IB_WC_SEND
;
1864 wc
->wc_flags
|= IB_WC_WITH_IMM
;
1866 case HNS_ROCE_SQ_OPCODE_RDMA_READ
:
1867 wc
->opcode
= IB_WC_RDMA_READ
;
1868 wc
->byte_len
= le32_to_cpu(cqe
->byte_cnt
);
1870 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE
:
1871 wc
->opcode
= IB_WC_RDMA_WRITE
;
1873 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM
:
1874 wc
->opcode
= IB_WC_RDMA_WRITE
;
1875 wc
->wc_flags
|= IB_WC_WITH_IMM
;
1877 case HNS_ROCE_SQ_OPCODE_LOCAL_INV
:
1878 wc
->opcode
= IB_WC_LOCAL_INV
;
1879 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
1881 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP
:
1882 wc
->opcode
= IB_WC_COMP_SWAP
;
1885 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD
:
1886 wc
->opcode
= IB_WC_FETCH_ADD
;
1889 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP
:
1890 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
1893 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD
:
1894 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
1897 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR
:
1898 wc
->opcode
= IB_WC_REG_MR
;
1900 case HNS_ROCE_SQ_OPCODE_BIND_MW
:
1901 wc
->opcode
= IB_WC_REG_MR
;
1904 wc
->status
= IB_WC_GENERAL_ERR
;
1908 wq
= &(*cur_qp
)->sq
;
1909 if ((*cur_qp
)->sq_signal_bits
) {
1911 * If sg_signal_bit is 1,
1912 * firstly tail pointer updated to wqe
1913 * which current cqe correspond to
1915 wqe_ctr
= (u16
)roce_get_field(cqe
->byte_4
,
1916 V2_CQE_BYTE_4_WQE_INDX_M
,
1917 V2_CQE_BYTE_4_WQE_INDX_S
);
1918 wq
->tail
+= (wqe_ctr
- (u16
)wq
->tail
) &
1922 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
1925 /* RQ correspond to CQE */
1926 wc
->byte_len
= le32_to_cpu(cqe
->byte_cnt
);
1928 opcode
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_OPCODE_M
,
1929 V2_CQE_BYTE_4_OPCODE_S
);
1930 switch (opcode
& 0x1f) {
1931 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM
:
1932 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
1933 wc
->wc_flags
= IB_WC_WITH_IMM
;
1934 wc
->ex
.imm_data
= cqe
->immtdata
;
1936 case HNS_ROCE_V2_OPCODE_SEND
:
1937 wc
->opcode
= IB_WC_RECV
;
1940 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM
:
1941 wc
->opcode
= IB_WC_RECV
;
1942 wc
->wc_flags
= IB_WC_WITH_IMM
;
1943 wc
->ex
.imm_data
= cqe
->immtdata
;
1945 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV
:
1946 wc
->opcode
= IB_WC_RECV
;
1947 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
1948 wc
->ex
.invalidate_rkey
= le32_to_cpu(cqe
->rkey
);
1951 wc
->status
= IB_WC_GENERAL_ERR
;
1955 if ((wc
->qp
->qp_type
== IB_QPT_RC
||
1956 wc
->qp
->qp_type
== IB_QPT_UC
) &&
1957 (opcode
== HNS_ROCE_V2_OPCODE_SEND
||
1958 opcode
== HNS_ROCE_V2_OPCODE_SEND_WITH_IMM
||
1959 opcode
== HNS_ROCE_V2_OPCODE_SEND_WITH_INV
) &&
1960 (roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_RQ_INLINE_S
))) {
1961 ret
= hns_roce_handle_recv_inl_wqe(cqe
, cur_qp
, wc
);
1966 /* Update tail pointer, record wr_id */
1967 wq
= &(*cur_qp
)->rq
;
1968 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
1971 wc
->sl
= (u8
)roce_get_field(cqe
->byte_32
, V2_CQE_BYTE_32_SL_M
,
1972 V2_CQE_BYTE_32_SL_S
);
1973 wc
->src_qp
= (u8
)roce_get_field(cqe
->byte_32
,
1974 V2_CQE_BYTE_32_RMT_QPN_M
,
1975 V2_CQE_BYTE_32_RMT_QPN_S
);
1976 wc
->wc_flags
|= (roce_get_bit(cqe
->byte_32
,
1977 V2_CQE_BYTE_32_GRH_S
) ?
1979 wc
->port_num
= roce_get_field(cqe
->byte_32
,
1980 V2_CQE_BYTE_32_PORTN_M
, V2_CQE_BYTE_32_PORTN_S
);
1982 memcpy(wc
->smac
, cqe
->smac
, 4);
1983 wc
->smac
[4] = roce_get_field(cqe
->byte_28
,
1984 V2_CQE_BYTE_28_SMAC_4_M
,
1985 V2_CQE_BYTE_28_SMAC_4_S
);
1986 wc
->smac
[5] = roce_get_field(cqe
->byte_28
,
1987 V2_CQE_BYTE_28_SMAC_5_M
,
1988 V2_CQE_BYTE_28_SMAC_5_S
);
1989 wc
->vlan_id
= 0xffff;
1990 wc
->wc_flags
|= (IB_WC_WITH_VLAN
| IB_WC_WITH_SMAC
);
1991 wc
->network_hdr_type
= roce_get_field(cqe
->byte_28
,
1992 V2_CQE_BYTE_28_PORT_TYPE_M
,
1993 V2_CQE_BYTE_28_PORT_TYPE_S
);
1999 static int hns_roce_v2_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
2002 struct hns_roce_cq
*hr_cq
= to_hr_cq(ibcq
);
2003 struct hns_roce_qp
*cur_qp
= NULL
;
2004 unsigned long flags
;
2007 spin_lock_irqsave(&hr_cq
->lock
, flags
);
2009 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
2010 if (hns_roce_v2_poll_one(hr_cq
, &cur_qp
, wc
+ npolled
))
2015 /* Memory barrier */
2017 hns_roce_v2_cq_set_ci(hr_cq
, hr_cq
->cons_index
);
2020 spin_unlock_irqrestore(&hr_cq
->lock
, flags
);
2025 static int hns_roce_v2_set_hem(struct hns_roce_dev
*hr_dev
,
2026 struct hns_roce_hem_table
*table
, int obj
,
2029 struct device
*dev
= hr_dev
->dev
;
2030 struct hns_roce_cmd_mailbox
*mailbox
;
2031 struct hns_roce_hem_iter iter
;
2032 struct hns_roce_hem_mhop mhop
;
2033 struct hns_roce_hem
*hem
;
2034 unsigned long mhop_obj
= obj
;
2044 if (!hns_roce_check_whether_mhop(hr_dev
, table
->type
))
2047 hns_roce_calc_hem_mhop(hr_dev
, table
, &mhop_obj
, &mhop
);
2051 hop_num
= mhop
.hop_num
;
2052 chunk_ba_num
= mhop
.bt_chunk_size
/ 8;
2055 hem_idx
= i
* chunk_ba_num
* chunk_ba_num
+ j
* chunk_ba_num
+
2057 l1_idx
= i
* chunk_ba_num
+ j
;
2058 } else if (hop_num
== 1) {
2059 hem_idx
= i
* chunk_ba_num
+ j
;
2060 } else if (hop_num
== HNS_ROCE_HOP_NUM_0
) {
2064 switch (table
->type
) {
2066 op
= HNS_ROCE_CMD_WRITE_QPC_BT0
;
2069 op
= HNS_ROCE_CMD_WRITE_MPT_BT0
;
2072 op
= HNS_ROCE_CMD_WRITE_CQC_BT0
;
2075 op
= HNS_ROCE_CMD_WRITE_SRQC_BT0
;
2078 dev_warn(dev
, "Table %d not to be written by mailbox!\n",
2084 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2085 if (IS_ERR(mailbox
))
2086 return PTR_ERR(mailbox
);
2088 if (check_whether_last_step(hop_num
, step_idx
)) {
2089 hem
= table
->hem
[hem_idx
];
2090 for (hns_roce_hem_first(hem
, &iter
);
2091 !hns_roce_hem_last(&iter
); hns_roce_hem_next(&iter
)) {
2092 bt_ba
= hns_roce_hem_addr(&iter
);
2094 /* configure the ba, tag, and op */
2095 ret
= hns_roce_cmd_mbox(hr_dev
, bt_ba
, mailbox
->dma
,
2097 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2101 bt_ba
= table
->bt_l0_dma_addr
[i
];
2102 else if (step_idx
== 1 && hop_num
== 2)
2103 bt_ba
= table
->bt_l1_dma_addr
[l1_idx
];
2105 /* configure the ba, tag, and op */
2106 ret
= hns_roce_cmd_mbox(hr_dev
, bt_ba
, mailbox
->dma
, obj
,
2107 0, op
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
2110 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2114 static int hns_roce_v2_clear_hem(struct hns_roce_dev
*hr_dev
,
2115 struct hns_roce_hem_table
*table
, int obj
,
2118 struct device
*dev
= hr_dev
->dev
;
2119 struct hns_roce_cmd_mailbox
*mailbox
;
2123 if (!hns_roce_check_whether_mhop(hr_dev
, table
->type
))
2126 switch (table
->type
) {
2128 op
= HNS_ROCE_CMD_DESTROY_QPC_BT0
;
2131 op
= HNS_ROCE_CMD_DESTROY_MPT_BT0
;
2134 op
= HNS_ROCE_CMD_DESTROY_CQC_BT0
;
2137 op
= HNS_ROCE_CMD_DESTROY_SRQC_BT0
;
2140 dev_warn(dev
, "Table %d not to be destroyed by mailbox!\n",
2146 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2147 if (IS_ERR(mailbox
))
2148 return PTR_ERR(mailbox
);
2150 /* configure the tag and op */
2151 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, obj
, 0, op
,
2152 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2154 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2158 static int hns_roce_v2_qp_modify(struct hns_roce_dev
*hr_dev
,
2159 struct hns_roce_mtt
*mtt
,
2160 enum ib_qp_state cur_state
,
2161 enum ib_qp_state new_state
,
2162 struct hns_roce_v2_qp_context
*context
,
2163 struct hns_roce_qp
*hr_qp
)
2165 struct hns_roce_cmd_mailbox
*mailbox
;
2168 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2169 if (IS_ERR(mailbox
))
2170 return PTR_ERR(mailbox
);
2172 memcpy(mailbox
->buf
, context
, sizeof(*context
) * 2);
2174 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, hr_qp
->qpn
, 0,
2175 HNS_ROCE_CMD_MODIFY_QPC
,
2176 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2178 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2183 static void set_access_flags(struct hns_roce_qp
*hr_qp
,
2184 struct hns_roce_v2_qp_context
*context
,
2185 struct hns_roce_v2_qp_context
*qpc_mask
,
2186 const struct ib_qp_attr
*attr
, int attr_mask
)
2191 dest_rd_atomic
= (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) ?
2192 attr
->max_dest_rd_atomic
: hr_qp
->resp_depth
;
2194 access_flags
= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
2195 attr
->qp_access_flags
: hr_qp
->atomic_rd_en
;
2197 if (!dest_rd_atomic
)
2198 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2200 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2201 !!(access_flags
& IB_ACCESS_REMOTE_READ
));
2202 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
, 0);
2204 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2205 !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
2206 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
, 0);
2208 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2209 !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
2210 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
, 0);
2213 static void modify_qp_reset_to_init(struct ib_qp
*ibqp
,
2214 const struct ib_qp_attr
*attr
,
2216 struct hns_roce_v2_qp_context
*context
,
2217 struct hns_roce_v2_qp_context
*qpc_mask
)
2219 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
2220 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
2223 * In v2 engine, software pass context and context mask to hardware
2224 * when modifying qp. If software need modify some fields in context,
2225 * we should set all bits of the relevant fields in context mask to
2226 * 0 at the same time, else set them to 0x1.
2228 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2229 V2_QPC_BYTE_4_TST_S
, to_hr_qp_type(hr_qp
->ibqp
.qp_type
));
2230 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2231 V2_QPC_BYTE_4_TST_S
, 0);
2233 if (ibqp
->qp_type
== IB_QPT_GSI
)
2234 roce_set_field(context
->byte_4_sqpn_tst
,
2235 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2236 V2_QPC_BYTE_4_SGE_SHIFT_S
,
2237 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
));
2239 roce_set_field(context
->byte_4_sqpn_tst
,
2240 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2241 V2_QPC_BYTE_4_SGE_SHIFT_S
,
2242 hr_qp
->sq
.max_gs
> 2 ?
2243 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
) : 0);
2245 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SGE_SHIFT_M
,
2246 V2_QPC_BYTE_4_SGE_SHIFT_S
, 0);
2248 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2249 V2_QPC_BYTE_4_SQPN_S
, hr_qp
->qpn
);
2250 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2251 V2_QPC_BYTE_4_SQPN_S
, 0);
2253 roce_set_field(context
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2254 V2_QPC_BYTE_16_PD_S
, to_hr_pd(ibqp
->pd
)->pdn
);
2255 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2256 V2_QPC_BYTE_16_PD_S
, 0);
2258 roce_set_field(context
->byte_20_smac_sgid_idx
, V2_QPC_BYTE_20_RQWS_M
,
2259 V2_QPC_BYTE_20_RQWS_S
, ilog2(hr_qp
->rq
.max_gs
));
2260 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
, V2_QPC_BYTE_20_RQWS_M
,
2261 V2_QPC_BYTE_20_RQWS_S
, 0);
2263 roce_set_field(context
->byte_20_smac_sgid_idx
,
2264 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
,
2265 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
2266 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2267 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
, 0);
2269 roce_set_field(context
->byte_20_smac_sgid_idx
,
2270 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
,
2271 ilog2((unsigned int)hr_qp
->rq
.wqe_cnt
));
2272 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2273 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
, 0);
2275 /* No VLAN need to set 0xFFF */
2276 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_VLAN_IDX_M
,
2277 V2_QPC_BYTE_24_VLAN_IDX_S
, 0xfff);
2278 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_VLAN_IDX_M
,
2279 V2_QPC_BYTE_24_VLAN_IDX_S
, 0);
2282 * Set some fields in context to zero, Because the default values
2283 * of all fields in context are zero, we need not set them to 0 again.
2284 * but we should set the relevant fields of context mask to 0.
2286 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_SQ_TX_ERR_S
, 0);
2287 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_SQ_RX_ERR_S
, 0);
2288 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_RQ_TX_ERR_S
, 0);
2289 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_RQ_RX_ERR_S
, 0);
2291 roce_set_field(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_MAPID_M
,
2292 V2_QPC_BYTE_60_MAPID_S
, 0);
2294 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
,
2295 V2_QPC_BYTE_60_INNER_MAP_IND_S
, 0);
2296 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_SQ_MAP_IND_S
,
2298 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_RQ_MAP_IND_S
,
2300 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_EXT_MAP_IND_S
,
2302 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_SQ_RLS_IND_S
,
2304 roce_set_bit(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_SQ_EXT_IND_S
,
2306 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_CNP_TX_FLAG_S
, 0);
2307 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_CE_FLAG_S
, 0);
2309 if (attr_mask
& IB_QP_QKEY
) {
2310 context
->qkey_xrcd
= attr
->qkey
;
2311 qpc_mask
->qkey_xrcd
= 0;
2312 hr_qp
->qkey
= attr
->qkey
;
2315 if (hr_qp
->rdb_en
) {
2316 roce_set_bit(context
->byte_68_rq_db
,
2317 V2_QPC_BYTE_68_RQ_RECORD_EN_S
, 1);
2318 roce_set_bit(qpc_mask
->byte_68_rq_db
,
2319 V2_QPC_BYTE_68_RQ_RECORD_EN_S
, 0);
2322 roce_set_field(context
->byte_68_rq_db
,
2323 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M
,
2324 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S
,
2325 ((u32
)hr_qp
->rdb
.dma
) >> 1);
2326 roce_set_field(qpc_mask
->byte_68_rq_db
,
2327 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M
,
2328 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S
, 0);
2329 context
->rq_db_record_addr
= hr_qp
->rdb
.dma
>> 32;
2330 qpc_mask
->rq_db_record_addr
= 0;
2332 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RQIE_S
,
2333 (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) ? 1 : 0);
2334 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RQIE_S
, 0);
2336 roce_set_field(context
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2337 V2_QPC_BYTE_80_RX_CQN_S
, to_hr_cq(ibqp
->recv_cq
)->cqn
);
2338 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2339 V2_QPC_BYTE_80_RX_CQN_S
, 0);
2341 roce_set_field(context
->byte_76_srqn_op_en
,
2342 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
,
2343 to_hr_srq(ibqp
->srq
)->srqn
);
2344 roce_set_field(qpc_mask
->byte_76_srqn_op_en
,
2345 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
, 0);
2346 roce_set_bit(context
->byte_76_srqn_op_en
,
2347 V2_QPC_BYTE_76_SRQ_EN_S
, 1);
2348 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
,
2349 V2_QPC_BYTE_76_SRQ_EN_S
, 0);
2352 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2353 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
2354 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, 0);
2355 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2356 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M
,
2357 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S
, 0);
2359 roce_set_field(qpc_mask
->byte_92_srq_info
, V2_QPC_BYTE_92_SRQ_INFO_M
,
2360 V2_QPC_BYTE_92_SRQ_INFO_S
, 0);
2362 roce_set_field(qpc_mask
->byte_96_rx_reqmsn
, V2_QPC_BYTE_96_RX_REQ_MSN_M
,
2363 V2_QPC_BYTE_96_RX_REQ_MSN_S
, 0);
2365 roce_set_field(qpc_mask
->byte_104_rq_sge
,
2366 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M
,
2367 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S
, 0);
2369 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
2370 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S
, 0);
2371 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
2372 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M
,
2373 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S
, 0);
2374 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
2375 V2_QPC_BYTE_108_RX_REQ_RNR_S
, 0);
2377 qpc_mask
->rq_rnr_timer
= 0;
2378 qpc_mask
->rx_msg_len
= 0;
2379 qpc_mask
->rx_rkey_pkt_info
= 0;
2380 qpc_mask
->rx_va
= 0;
2382 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M
,
2383 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S
, 0);
2384 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M
,
2385 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S
, 0);
2387 roce_set_bit(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S
, 0);
2388 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M
,
2389 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S
, 0);
2390 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M
,
2391 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S
, 0);
2393 roce_set_field(qpc_mask
->byte_144_raq
,
2394 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M
,
2395 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S
, 0);
2396 roce_set_bit(qpc_mask
->byte_144_raq
, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S
,
2398 roce_set_field(qpc_mask
->byte_144_raq
, V2_QPC_BYTE_144_RAQ_CREDIT_M
,
2399 V2_QPC_BYTE_144_RAQ_CREDIT_S
, 0);
2400 roce_set_bit(qpc_mask
->byte_144_raq
, V2_QPC_BYTE_144_RESP_RTY_FLG_S
, 0);
2402 roce_set_field(qpc_mask
->byte_148_raq
, V2_QPC_BYTE_148_RQ_MSN_M
,
2403 V2_QPC_BYTE_148_RQ_MSN_S
, 0);
2404 roce_set_field(qpc_mask
->byte_148_raq
, V2_QPC_BYTE_148_RAQ_SYNDROME_M
,
2405 V2_QPC_BYTE_148_RAQ_SYNDROME_S
, 0);
2407 roce_set_field(qpc_mask
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
2408 V2_QPC_BYTE_152_RAQ_PSN_S
, 0);
2409 roce_set_field(qpc_mask
->byte_152_raq
,
2410 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M
,
2411 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S
, 0);
2413 roce_set_field(qpc_mask
->byte_156_raq
, V2_QPC_BYTE_156_RAQ_USE_PKTN_M
,
2414 V2_QPC_BYTE_156_RAQ_USE_PKTN_S
, 0);
2416 roce_set_field(qpc_mask
->byte_160_sq_ci_pi
,
2417 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M
,
2418 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S
, 0);
2419 roce_set_field(qpc_mask
->byte_160_sq_ci_pi
,
2420 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M
,
2421 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S
, 0);
2423 roce_set_field(context
->byte_168_irrl_idx
,
2424 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M
,
2425 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S
,
2426 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
2427 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
2428 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M
,
2429 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S
, 0);
2431 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
2432 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S
, 0);
2433 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
2434 V2_QPC_BYTE_168_SQ_INVLD_FLG_S
, 0);
2435 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
2436 V2_QPC_BYTE_168_IRRL_IDX_LSB_M
,
2437 V2_QPC_BYTE_168_IRRL_IDX_LSB_S
, 0);
2439 roce_set_field(context
->byte_172_sq_psn
, V2_QPC_BYTE_172_ACK_REQ_FREQ_M
,
2440 V2_QPC_BYTE_172_ACK_REQ_FREQ_S
, 4);
2441 roce_set_field(qpc_mask
->byte_172_sq_psn
,
2442 V2_QPC_BYTE_172_ACK_REQ_FREQ_M
,
2443 V2_QPC_BYTE_172_ACK_REQ_FREQ_S
, 0);
2445 roce_set_bit(qpc_mask
->byte_172_sq_psn
, V2_QPC_BYTE_172_MSG_RNR_FLG_S
,
2448 roce_set_field(qpc_mask
->byte_176_msg_pktn
,
2449 V2_QPC_BYTE_176_MSG_USE_PKTN_M
,
2450 V2_QPC_BYTE_176_MSG_USE_PKTN_S
, 0);
2451 roce_set_field(qpc_mask
->byte_176_msg_pktn
,
2452 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M
,
2453 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S
, 0);
2455 roce_set_field(qpc_mask
->byte_184_irrl_idx
,
2456 V2_QPC_BYTE_184_IRRL_IDX_MSB_M
,
2457 V2_QPC_BYTE_184_IRRL_IDX_MSB_S
, 0);
2459 qpc_mask
->cur_sge_offset
= 0;
2461 roce_set_field(qpc_mask
->byte_192_ext_sge
,
2462 V2_QPC_BYTE_192_CUR_SGE_IDX_M
,
2463 V2_QPC_BYTE_192_CUR_SGE_IDX_S
, 0);
2464 roce_set_field(qpc_mask
->byte_192_ext_sge
,
2465 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M
,
2466 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S
, 0);
2468 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_IRRL_HEAD_M
,
2469 V2_QPC_BYTE_196_IRRL_HEAD_S
, 0);
2471 roce_set_field(qpc_mask
->byte_200_sq_max
, V2_QPC_BYTE_200_SQ_MAX_IDX_M
,
2472 V2_QPC_BYTE_200_SQ_MAX_IDX_S
, 0);
2473 roce_set_field(qpc_mask
->byte_200_sq_max
,
2474 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M
,
2475 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S
, 0);
2477 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_PKT_RNR_FLG_S
, 0);
2478 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_PKT_RTY_FLG_S
, 0);
2480 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_CHECK_FLG_M
,
2481 V2_QPC_BYTE_212_CHECK_FLG_S
, 0);
2483 qpc_mask
->sq_timer
= 0;
2485 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
2486 V2_QPC_BYTE_220_RETRY_MSG_MSN_M
,
2487 V2_QPC_BYTE_220_RETRY_MSG_MSN_S
, 0);
2488 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
2489 V2_QPC_BYTE_232_IRRL_SGE_IDX_M
,
2490 V2_QPC_BYTE_232_IRRL_SGE_IDX_S
, 0);
2492 qpc_mask
->irrl_cur_sge_offset
= 0;
2494 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
2495 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M
,
2496 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S
, 0);
2497 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
2498 V2_QPC_BYTE_240_IRRL_TAIL_RD_M
,
2499 V2_QPC_BYTE_240_IRRL_TAIL_RD_S
, 0);
2500 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
2501 V2_QPC_BYTE_240_RX_ACK_MSN_M
,
2502 V2_QPC_BYTE_240_RX_ACK_MSN_S
, 0);
2504 roce_set_field(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_IRRL_PSN_M
,
2505 V2_QPC_BYTE_248_IRRL_PSN_S
, 0);
2506 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_ACK_PSN_ERR_S
,
2508 roce_set_field(qpc_mask
->byte_248_ack_psn
,
2509 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M
,
2510 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S
, 0);
2511 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_IRRL_PSN_VLD_S
,
2513 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
2514 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S
, 0);
2515 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_CQ_ERR_IND_S
,
2518 hr_qp
->access_flags
= attr
->qp_access_flags
;
2519 hr_qp
->pkey_index
= attr
->pkey_index
;
2520 roce_set_field(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
2521 V2_QPC_BYTE_252_TX_CQN_S
, to_hr_cq(ibqp
->send_cq
)->cqn
);
2522 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
2523 V2_QPC_BYTE_252_TX_CQN_S
, 0);
2525 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_ERR_TYPE_M
,
2526 V2_QPC_BYTE_252_ERR_TYPE_S
, 0);
2528 roce_set_field(qpc_mask
->byte_256_sqflush_rqcqe
,
2529 V2_QPC_BYTE_256_RQ_CQE_IDX_M
,
2530 V2_QPC_BYTE_256_RQ_CQE_IDX_S
, 0);
2531 roce_set_field(qpc_mask
->byte_256_sqflush_rqcqe
,
2532 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M
,
2533 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S
, 0);
2536 static void modify_qp_init_to_init(struct ib_qp
*ibqp
,
2537 const struct ib_qp_attr
*attr
, int attr_mask
,
2538 struct hns_roce_v2_qp_context
*context
,
2539 struct hns_roce_v2_qp_context
*qpc_mask
)
2541 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
2544 * In v2 engine, software pass context and context mask to hardware
2545 * when modifying qp. If software need modify some fields in context,
2546 * we should set all bits of the relevant fields in context mask to
2547 * 0 at the same time, else set them to 0x1.
2549 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2550 V2_QPC_BYTE_4_TST_S
, to_hr_qp_type(hr_qp
->ibqp
.qp_type
));
2551 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2552 V2_QPC_BYTE_4_TST_S
, 0);
2554 if (ibqp
->qp_type
== IB_QPT_GSI
)
2555 roce_set_field(context
->byte_4_sqpn_tst
,
2556 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2557 V2_QPC_BYTE_4_SGE_SHIFT_S
,
2558 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
));
2560 roce_set_field(context
->byte_4_sqpn_tst
,
2561 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2562 V2_QPC_BYTE_4_SGE_SHIFT_S
, hr_qp
->sq
.max_gs
> 2 ?
2563 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
) : 0);
2565 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SGE_SHIFT_M
,
2566 V2_QPC_BYTE_4_SGE_SHIFT_S
, 0);
2568 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
2569 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2570 !!(attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
));
2571 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2574 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2575 !!(attr
->qp_access_flags
&
2576 IB_ACCESS_REMOTE_WRITE
));
2577 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2580 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2581 !!(attr
->qp_access_flags
&
2582 IB_ACCESS_REMOTE_ATOMIC
));
2583 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2586 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2587 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_READ
));
2588 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2591 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2592 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_WRITE
));
2593 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2596 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2597 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
2598 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2602 roce_set_field(context
->byte_20_smac_sgid_idx
,
2603 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
,
2604 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
2605 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2606 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
, 0);
2608 roce_set_field(context
->byte_20_smac_sgid_idx
,
2609 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
,
2610 ilog2((unsigned int)hr_qp
->rq
.wqe_cnt
));
2611 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2612 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
, 0);
2614 roce_set_field(context
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2615 V2_QPC_BYTE_16_PD_S
, to_hr_pd(ibqp
->pd
)->pdn
);
2616 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2617 V2_QPC_BYTE_16_PD_S
, 0);
2619 roce_set_field(context
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2620 V2_QPC_BYTE_80_RX_CQN_S
, to_hr_cq(ibqp
->recv_cq
)->cqn
);
2621 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2622 V2_QPC_BYTE_80_RX_CQN_S
, 0);
2624 roce_set_field(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
2625 V2_QPC_BYTE_252_TX_CQN_S
, to_hr_cq(ibqp
->send_cq
)->cqn
);
2626 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
2627 V2_QPC_BYTE_252_TX_CQN_S
, 0);
2630 roce_set_bit(context
->byte_76_srqn_op_en
,
2631 V2_QPC_BYTE_76_SRQ_EN_S
, 1);
2632 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
,
2633 V2_QPC_BYTE_76_SRQ_EN_S
, 0);
2634 roce_set_field(context
->byte_76_srqn_op_en
,
2635 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
,
2636 to_hr_srq(ibqp
->srq
)->srqn
);
2637 roce_set_field(qpc_mask
->byte_76_srqn_op_en
,
2638 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
, 0);
2641 if (attr_mask
& IB_QP_QKEY
) {
2642 context
->qkey_xrcd
= attr
->qkey
;
2643 qpc_mask
->qkey_xrcd
= 0;
2646 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2647 V2_QPC_BYTE_4_SQPN_S
, hr_qp
->qpn
);
2648 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2649 V2_QPC_BYTE_4_SQPN_S
, 0);
2651 if (attr_mask
& IB_QP_DEST_QPN
) {
2652 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_DQPN_M
,
2653 V2_QPC_BYTE_56_DQPN_S
, hr_qp
->qpn
);
2654 roce_set_field(qpc_mask
->byte_56_dqpn_err
,
2655 V2_QPC_BYTE_56_DQPN_M
, V2_QPC_BYTE_56_DQPN_S
, 0);
2657 roce_set_field(context
->byte_168_irrl_idx
,
2658 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M
,
2659 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S
,
2660 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
2661 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
2662 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M
,
2663 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S
, 0);
2666 static int modify_qp_init_to_rtr(struct ib_qp
*ibqp
,
2667 const struct ib_qp_attr
*attr
, int attr_mask
,
2668 struct hns_roce_v2_qp_context
*context
,
2669 struct hns_roce_v2_qp_context
*qpc_mask
)
2671 const struct ib_global_route
*grh
= rdma_ah_read_grh(&attr
->ah_attr
);
2672 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
2673 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
2674 struct device
*dev
= hr_dev
->dev
;
2675 dma_addr_t dma_handle_3
;
2676 dma_addr_t dma_handle_2
;
2677 dma_addr_t dma_handle
;
2687 /* Search qp buf's mtts */
2688 mtts
= hns_roce_table_find(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
2689 hr_qp
->mtt
.first_seg
, &dma_handle
);
2691 dev_err(dev
, "qp buf pa find failed\n");
2695 /* Search IRRL's mtts */
2696 mtts_2
= hns_roce_table_find(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
2697 hr_qp
->qpn
, &dma_handle_2
);
2699 dev_err(dev
, "qp irrl_table find failed\n");
2703 /* Search TRRL's mtts */
2704 mtts_3
= hns_roce_table_find(hr_dev
, &hr_dev
->qp_table
.trrl_table
,
2705 hr_qp
->qpn
, &dma_handle_3
);
2707 dev_err(dev
, "qp trrl_table find failed\n");
2711 if (attr_mask
& IB_QP_ALT_PATH
) {
2712 dev_err(dev
, "INIT2RTR attr_mask (0x%x) error\n", attr_mask
);
2716 dmac
= (u8
*)attr
->ah_attr
.roce
.dmac
;
2717 context
->wqe_sge_ba
= (u32
)(dma_handle
>> 3);
2718 qpc_mask
->wqe_sge_ba
= 0;
2721 * In v2 engine, software pass context and context mask to hardware
2722 * when modifying qp. If software need modify some fields in context,
2723 * we should set all bits of the relevant fields in context mask to
2724 * 0 at the same time, else set them to 0x1.
2726 roce_set_field(context
->byte_12_sq_hop
, V2_QPC_BYTE_12_WQE_SGE_BA_M
,
2727 V2_QPC_BYTE_12_WQE_SGE_BA_S
, dma_handle
>> (32 + 3));
2728 roce_set_field(qpc_mask
->byte_12_sq_hop
, V2_QPC_BYTE_12_WQE_SGE_BA_M
,
2729 V2_QPC_BYTE_12_WQE_SGE_BA_S
, 0);
2731 roce_set_field(context
->byte_12_sq_hop
, V2_QPC_BYTE_12_SQ_HOP_NUM_M
,
2732 V2_QPC_BYTE_12_SQ_HOP_NUM_S
,
2733 hr_dev
->caps
.mtt_hop_num
== HNS_ROCE_HOP_NUM_0
?
2734 0 : hr_dev
->caps
.mtt_hop_num
);
2735 roce_set_field(qpc_mask
->byte_12_sq_hop
, V2_QPC_BYTE_12_SQ_HOP_NUM_M
,
2736 V2_QPC_BYTE_12_SQ_HOP_NUM_S
, 0);
2738 roce_set_field(context
->byte_20_smac_sgid_idx
,
2739 V2_QPC_BYTE_20_SGE_HOP_NUM_M
,
2740 V2_QPC_BYTE_20_SGE_HOP_NUM_S
,
2741 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
2742 hr_dev
->caps
.mtt_hop_num
: 0);
2743 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2744 V2_QPC_BYTE_20_SGE_HOP_NUM_M
,
2745 V2_QPC_BYTE_20_SGE_HOP_NUM_S
, 0);
2747 roce_set_field(context
->byte_20_smac_sgid_idx
,
2748 V2_QPC_BYTE_20_RQ_HOP_NUM_M
,
2749 V2_QPC_BYTE_20_RQ_HOP_NUM_S
,
2750 hr_dev
->caps
.mtt_hop_num
== HNS_ROCE_HOP_NUM_0
?
2751 0 : hr_dev
->caps
.mtt_hop_num
);
2752 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2753 V2_QPC_BYTE_20_RQ_HOP_NUM_M
,
2754 V2_QPC_BYTE_20_RQ_HOP_NUM_S
, 0);
2756 roce_set_field(context
->byte_16_buf_ba_pg_sz
,
2757 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M
,
2758 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S
,
2759 hr_dev
->caps
.mtt_ba_pg_sz
+ PG_SHIFT_OFFSET
);
2760 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
,
2761 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M
,
2762 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S
, 0);
2764 roce_set_field(context
->byte_16_buf_ba_pg_sz
,
2765 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M
,
2766 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S
,
2767 hr_dev
->caps
.mtt_buf_pg_sz
+ PG_SHIFT_OFFSET
);
2768 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
,
2769 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M
,
2770 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S
, 0);
2772 roce_set_field(context
->byte_80_rnr_rx_cqn
,
2773 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
2774 V2_QPC_BYTE_80_MIN_RNR_TIME_S
, attr
->min_rnr_timer
);
2775 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
,
2776 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
2777 V2_QPC_BYTE_80_MIN_RNR_TIME_S
, 0);
2779 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
2780 context
->rq_cur_blk_addr
= (u32
)(mtts
[hr_qp
->rq
.offset
/ page_size
]
2781 >> PAGE_ADDR_SHIFT
);
2782 qpc_mask
->rq_cur_blk_addr
= 0;
2784 roce_set_field(context
->byte_92_srq_info
,
2785 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M
,
2786 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S
,
2787 mtts
[hr_qp
->rq
.offset
/ page_size
]
2788 >> (32 + PAGE_ADDR_SHIFT
));
2789 roce_set_field(qpc_mask
->byte_92_srq_info
,
2790 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M
,
2791 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S
, 0);
2793 context
->rq_nxt_blk_addr
= (u32
)(mtts
[hr_qp
->rq
.offset
/ page_size
+ 1]
2794 >> PAGE_ADDR_SHIFT
);
2795 qpc_mask
->rq_nxt_blk_addr
= 0;
2797 roce_set_field(context
->byte_104_rq_sge
,
2798 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M
,
2799 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S
,
2800 mtts
[hr_qp
->rq
.offset
/ page_size
+ 1]
2801 >> (32 + PAGE_ADDR_SHIFT
));
2802 roce_set_field(qpc_mask
->byte_104_rq_sge
,
2803 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M
,
2804 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S
, 0);
2806 roce_set_field(context
->byte_108_rx_reqepsn
,
2807 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
2808 V2_QPC_BYTE_108_RX_REQ_EPSN_S
, attr
->rq_psn
);
2809 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
2810 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
2811 V2_QPC_BYTE_108_RX_REQ_EPSN_S
, 0);
2813 roce_set_field(context
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_BA_M
,
2814 V2_QPC_BYTE_132_TRRL_BA_S
, dma_handle_3
>> 4);
2815 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_BA_M
,
2816 V2_QPC_BYTE_132_TRRL_BA_S
, 0);
2817 context
->trrl_ba
= (u32
)(dma_handle_3
>> (16 + 4));
2818 qpc_mask
->trrl_ba
= 0;
2819 roce_set_field(context
->byte_140_raq
, V2_QPC_BYTE_140_TRRL_BA_M
,
2820 V2_QPC_BYTE_140_TRRL_BA_S
,
2821 (u32
)(dma_handle_3
>> (32 + 16 + 4)));
2822 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_TRRL_BA_M
,
2823 V2_QPC_BYTE_140_TRRL_BA_S
, 0);
2825 context
->irrl_ba
= (u32
)(dma_handle_2
>> 6);
2826 qpc_mask
->irrl_ba
= 0;
2827 roce_set_field(context
->byte_208_irrl
, V2_QPC_BYTE_208_IRRL_BA_M
,
2828 V2_QPC_BYTE_208_IRRL_BA_S
,
2829 dma_handle_2
>> (32 + 6));
2830 roce_set_field(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_IRRL_BA_M
,
2831 V2_QPC_BYTE_208_IRRL_BA_S
, 0);
2833 roce_set_bit(context
->byte_208_irrl
, V2_QPC_BYTE_208_RMT_E2E_S
, 1);
2834 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_RMT_E2E_S
, 0);
2836 roce_set_bit(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_SIG_TYPE_S
,
2837 hr_qp
->sq_signal_bits
);
2838 roce_set_bit(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_SIG_TYPE_S
,
2841 port
= (attr_mask
& IB_QP_PORT
) ? (attr
->port_num
- 1) : hr_qp
->port
;
2843 smac
= (u8
*)hr_dev
->dev_addr
[port
];
2844 /* when dmac equals smac or loop_idc is 1, it should loopback */
2845 if (ether_addr_equal_unaligned(dmac
, smac
) ||
2846 hr_dev
->loop_idc
== 0x1) {
2847 roce_set_bit(context
->byte_28_at_fl
, V2_QPC_BYTE_28_LBI_S
, 1);
2848 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_LBI_S
, 0);
2851 if ((attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) &&
2852 attr
->max_dest_rd_atomic
) {
2853 roce_set_field(context
->byte_140_raq
, V2_QPC_BYTE_140_RR_MAX_M
,
2854 V2_QPC_BYTE_140_RR_MAX_S
,
2855 fls(attr
->max_dest_rd_atomic
- 1));
2856 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RR_MAX_M
,
2857 V2_QPC_BYTE_140_RR_MAX_S
, 0);
2860 if (attr_mask
& IB_QP_DEST_QPN
) {
2861 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_DQPN_M
,
2862 V2_QPC_BYTE_56_DQPN_S
, attr
->dest_qp_num
);
2863 roce_set_field(qpc_mask
->byte_56_dqpn_err
,
2864 V2_QPC_BYTE_56_DQPN_M
, V2_QPC_BYTE_56_DQPN_S
, 0);
2867 /* Configure GID index */
2868 port_num
= rdma_ah_get_port_num(&attr
->ah_attr
);
2869 roce_set_field(context
->byte_20_smac_sgid_idx
,
2870 V2_QPC_BYTE_20_SGID_IDX_M
,
2871 V2_QPC_BYTE_20_SGID_IDX_S
,
2872 hns_get_gid_index(hr_dev
, port_num
- 1,
2874 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2875 V2_QPC_BYTE_20_SGID_IDX_M
,
2876 V2_QPC_BYTE_20_SGID_IDX_S
, 0);
2877 memcpy(&(context
->dmac
), dmac
, 4);
2878 roce_set_field(context
->byte_52_udpspn_dmac
, V2_QPC_BYTE_52_DMAC_M
,
2879 V2_QPC_BYTE_52_DMAC_S
, *((u16
*)(&dmac
[4])));
2881 roce_set_field(qpc_mask
->byte_52_udpspn_dmac
, V2_QPC_BYTE_52_DMAC_M
,
2882 V2_QPC_BYTE_52_DMAC_S
, 0);
2884 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_LP_PKTN_INI_M
,
2885 V2_QPC_BYTE_56_LP_PKTN_INI_S
, 4);
2886 roce_set_field(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_LP_PKTN_INI_M
,
2887 V2_QPC_BYTE_56_LP_PKTN_INI_S
, 0);
2889 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_HOP_LIMIT_M
,
2890 V2_QPC_BYTE_24_HOP_LIMIT_S
, grh
->hop_limit
);
2891 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_HOP_LIMIT_M
,
2892 V2_QPC_BYTE_24_HOP_LIMIT_S
, 0);
2894 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_FL_M
,
2895 V2_QPC_BYTE_28_FL_S
, grh
->flow_label
);
2896 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_FL_M
,
2897 V2_QPC_BYTE_28_FL_S
, 0);
2899 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_TC_M
,
2900 V2_QPC_BYTE_24_TC_S
, grh
->traffic_class
);
2901 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_TC_M
,
2902 V2_QPC_BYTE_24_TC_S
, 0);
2904 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_UD
)
2905 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
2906 V2_QPC_BYTE_24_MTU_S
, IB_MTU_4096
);
2907 else if (attr_mask
& IB_QP_PATH_MTU
)
2908 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
2909 V2_QPC_BYTE_24_MTU_S
, attr
->path_mtu
);
2911 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
2912 V2_QPC_BYTE_24_MTU_S
, 0);
2914 memcpy(context
->dgid
, grh
->dgid
.raw
, sizeof(grh
->dgid
.raw
));
2915 memset(qpc_mask
->dgid
, 0, sizeof(grh
->dgid
.raw
));
2917 roce_set_field(context
->byte_84_rq_ci_pi
,
2918 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
2919 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, hr_qp
->rq
.head
);
2920 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2921 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
2922 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, 0);
2924 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2925 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M
,
2926 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S
, 0);
2927 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
2928 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S
, 0);
2929 roce_set_field(qpc_mask
->byte_96_rx_reqmsn
, V2_QPC_BYTE_96_RX_REQ_MSN_M
,
2930 V2_QPC_BYTE_96_RX_REQ_MSN_S
, 0);
2931 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
2932 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M
,
2933 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S
, 0);
2935 context
->rq_rnr_timer
= 0;
2936 qpc_mask
->rq_rnr_timer
= 0;
2938 roce_set_field(context
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
2939 V2_QPC_BYTE_152_RAQ_PSN_S
, attr
->rq_psn
- 1);
2940 roce_set_field(qpc_mask
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
2941 V2_QPC_BYTE_152_RAQ_PSN_S
, 0);
2943 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M
,
2944 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S
, 0);
2945 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M
,
2946 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S
, 0);
2948 roce_set_field(context
->byte_168_irrl_idx
,
2949 V2_QPC_BYTE_168_LP_SGEN_INI_M
,
2950 V2_QPC_BYTE_168_LP_SGEN_INI_S
, 3);
2951 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
2952 V2_QPC_BYTE_168_LP_SGEN_INI_M
,
2953 V2_QPC_BYTE_168_LP_SGEN_INI_S
, 0);
2955 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
2956 V2_QPC_BYTE_28_SL_S
, rdma_ah_get_sl(&attr
->ah_attr
));
2957 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
2958 V2_QPC_BYTE_28_SL_S
, 0);
2959 hr_qp
->sl
= rdma_ah_get_sl(&attr
->ah_attr
);
2964 static int modify_qp_rtr_to_rts(struct ib_qp
*ibqp
,
2965 const struct ib_qp_attr
*attr
, int attr_mask
,
2966 struct hns_roce_v2_qp_context
*context
,
2967 struct hns_roce_v2_qp_context
*qpc_mask
)
2969 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
2970 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
2971 struct device
*dev
= hr_dev
->dev
;
2972 dma_addr_t dma_handle
;
2976 /* Search qp buf's mtts */
2977 mtts
= hns_roce_table_find(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
2978 hr_qp
->mtt
.first_seg
, &dma_handle
);
2980 dev_err(dev
, "qp buf pa find failed\n");
2984 /* Not support alternate path and path migration */
2985 if ((attr_mask
& IB_QP_ALT_PATH
) ||
2986 (attr_mask
& IB_QP_PATH_MIG_STATE
)) {
2987 dev_err(dev
, "RTR2RTS attr_mask (0x%x)error\n", attr_mask
);
2992 * In v2 engine, software pass context and context mask to hardware
2993 * when modifying qp. If software need modify some fields in context,
2994 * we should set all bits of the relevant fields in context mask to
2995 * 0 at the same time, else set them to 0x1.
2997 roce_set_field(context
->byte_60_qpst_mapid
,
2998 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M
,
2999 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S
, attr
->retry_cnt
);
3000 roce_set_field(qpc_mask
->byte_60_qpst_mapid
,
3001 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M
,
3002 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S
, 0);
3004 context
->sq_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
3005 roce_set_field(context
->byte_168_irrl_idx
,
3006 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M
,
3007 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S
,
3008 mtts
[0] >> (32 + PAGE_ADDR_SHIFT
));
3009 qpc_mask
->sq_cur_blk_addr
= 0;
3010 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
3011 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M
,
3012 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S
, 0);
3014 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
3015 context
->sq_cur_sge_blk_addr
=
3016 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
3017 ((u32
)(mtts
[hr_qp
->sge
.offset
/ page_size
]
3018 >> PAGE_ADDR_SHIFT
)) : 0;
3019 roce_set_field(context
->byte_184_irrl_idx
,
3020 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M
,
3021 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S
,
3022 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
3023 (mtts
[hr_qp
->sge
.offset
/ page_size
] >>
3024 (32 + PAGE_ADDR_SHIFT
)) : 0);
3025 qpc_mask
->sq_cur_sge_blk_addr
= 0;
3026 roce_set_field(qpc_mask
->byte_184_irrl_idx
,
3027 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M
,
3028 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S
, 0);
3030 context
->rx_sq_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
3031 roce_set_field(context
->byte_232_irrl_sge
,
3032 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M
,
3033 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S
,
3034 mtts
[0] >> (32 + PAGE_ADDR_SHIFT
));
3035 qpc_mask
->rx_sq_cur_blk_addr
= 0;
3036 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
3037 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M
,
3038 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S
, 0);
3041 * Set some fields in context to zero, Because the default values
3042 * of all fields in context are zero, we need not set them to 0 again.
3043 * but we should set the relevant fields of context mask to 0.
3045 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
3046 V2_QPC_BYTE_232_IRRL_SGE_IDX_M
,
3047 V2_QPC_BYTE_232_IRRL_SGE_IDX_S
, 0);
3049 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3050 V2_QPC_BYTE_240_RX_ACK_MSN_M
,
3051 V2_QPC_BYTE_240_RX_ACK_MSN_S
, 0);
3053 roce_set_field(context
->byte_244_rnr_rxack
,
3054 V2_QPC_BYTE_244_RX_ACK_EPSN_M
,
3055 V2_QPC_BYTE_244_RX_ACK_EPSN_S
, attr
->sq_psn
);
3056 roce_set_field(qpc_mask
->byte_244_rnr_rxack
,
3057 V2_QPC_BYTE_244_RX_ACK_EPSN_M
,
3058 V2_QPC_BYTE_244_RX_ACK_EPSN_S
, 0);
3060 roce_set_field(qpc_mask
->byte_248_ack_psn
,
3061 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M
,
3062 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S
, 0);
3063 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
3064 V2_QPC_BYTE_248_IRRL_PSN_VLD_S
, 0);
3065 roce_set_field(qpc_mask
->byte_248_ack_psn
,
3066 V2_QPC_BYTE_248_IRRL_PSN_M
,
3067 V2_QPC_BYTE_248_IRRL_PSN_S
, 0);
3069 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3070 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M
,
3071 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S
, 0);
3073 roce_set_field(context
->byte_220_retry_psn_msn
,
3074 V2_QPC_BYTE_220_RETRY_MSG_PSN_M
,
3075 V2_QPC_BYTE_220_RETRY_MSG_PSN_S
, attr
->sq_psn
);
3076 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
3077 V2_QPC_BYTE_220_RETRY_MSG_PSN_M
,
3078 V2_QPC_BYTE_220_RETRY_MSG_PSN_S
, 0);
3080 roce_set_field(context
->byte_224_retry_msg
,
3081 V2_QPC_BYTE_224_RETRY_MSG_PSN_M
,
3082 V2_QPC_BYTE_224_RETRY_MSG_PSN_S
, attr
->sq_psn
>> 16);
3083 roce_set_field(qpc_mask
->byte_224_retry_msg
,
3084 V2_QPC_BYTE_224_RETRY_MSG_PSN_M
,
3085 V2_QPC_BYTE_224_RETRY_MSG_PSN_S
, 0);
3087 roce_set_field(context
->byte_224_retry_msg
,
3088 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M
,
3089 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S
, attr
->sq_psn
);
3090 roce_set_field(qpc_mask
->byte_224_retry_msg
,
3091 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M
,
3092 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S
, 0);
3094 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
3095 V2_QPC_BYTE_220_RETRY_MSG_MSN_M
,
3096 V2_QPC_BYTE_220_RETRY_MSG_MSN_S
, 0);
3098 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
3099 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S
, 0);
3101 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_CHECK_FLG_M
,
3102 V2_QPC_BYTE_212_CHECK_FLG_S
, 0);
3104 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_CNT_M
,
3105 V2_QPC_BYTE_212_RETRY_CNT_S
, attr
->retry_cnt
);
3106 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_CNT_M
,
3107 V2_QPC_BYTE_212_RETRY_CNT_S
, 0);
3109 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_NUM_INIT_M
,
3110 V2_QPC_BYTE_212_RETRY_NUM_INIT_S
, attr
->retry_cnt
);
3111 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_NUM_INIT_M
,
3112 V2_QPC_BYTE_212_RETRY_NUM_INIT_S
, 0);
3114 roce_set_field(context
->byte_244_rnr_rxack
,
3115 V2_QPC_BYTE_244_RNR_NUM_INIT_M
,
3116 V2_QPC_BYTE_244_RNR_NUM_INIT_S
, attr
->rnr_retry
);
3117 roce_set_field(qpc_mask
->byte_244_rnr_rxack
,
3118 V2_QPC_BYTE_244_RNR_NUM_INIT_M
,
3119 V2_QPC_BYTE_244_RNR_NUM_INIT_S
, 0);
3121 roce_set_field(context
->byte_244_rnr_rxack
, V2_QPC_BYTE_244_RNR_CNT_M
,
3122 V2_QPC_BYTE_244_RNR_CNT_S
, attr
->rnr_retry
);
3123 roce_set_field(qpc_mask
->byte_244_rnr_rxack
, V2_QPC_BYTE_244_RNR_CNT_M
,
3124 V2_QPC_BYTE_244_RNR_CNT_S
, 0);
3126 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_LSN_M
,
3127 V2_QPC_BYTE_212_LSN_S
, 0x100);
3128 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_LSN_M
,
3129 V2_QPC_BYTE_212_LSN_S
, 0);
3131 if (attr_mask
& IB_QP_TIMEOUT
) {
3132 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_AT_M
,
3133 V2_QPC_BYTE_28_AT_S
, attr
->timeout
);
3134 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_AT_M
,
3135 V2_QPC_BYTE_28_AT_S
, 0);
3138 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
3139 V2_QPC_BYTE_28_SL_S
,
3140 rdma_ah_get_sl(&attr
->ah_attr
));
3141 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
3142 V2_QPC_BYTE_28_SL_S
, 0);
3143 hr_qp
->sl
= rdma_ah_get_sl(&attr
->ah_attr
);
3145 roce_set_field(context
->byte_172_sq_psn
, V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
3146 V2_QPC_BYTE_172_SQ_CUR_PSN_S
, attr
->sq_psn
);
3147 roce_set_field(qpc_mask
->byte_172_sq_psn
, V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
3148 V2_QPC_BYTE_172_SQ_CUR_PSN_S
, 0);
3150 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_IRRL_HEAD_M
,
3151 V2_QPC_BYTE_196_IRRL_HEAD_S
, 0);
3152 roce_set_field(context
->byte_196_sq_psn
, V2_QPC_BYTE_196_SQ_MAX_PSN_M
,
3153 V2_QPC_BYTE_196_SQ_MAX_PSN_S
, attr
->sq_psn
);
3154 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_SQ_MAX_PSN_M
,
3155 V2_QPC_BYTE_196_SQ_MAX_PSN_S
, 0);
3157 if ((attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) && attr
->max_rd_atomic
) {
3158 roce_set_field(context
->byte_208_irrl
, V2_QPC_BYTE_208_SR_MAX_M
,
3159 V2_QPC_BYTE_208_SR_MAX_S
,
3160 fls(attr
->max_rd_atomic
- 1));
3161 roce_set_field(qpc_mask
->byte_208_irrl
,
3162 V2_QPC_BYTE_208_SR_MAX_M
,
3163 V2_QPC_BYTE_208_SR_MAX_S
, 0);
3168 static int hns_roce_v2_modify_qp(struct ib_qp
*ibqp
,
3169 const struct ib_qp_attr
*attr
,
3170 int attr_mask
, enum ib_qp_state cur_state
,
3171 enum ib_qp_state new_state
)
3173 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3174 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3175 struct hns_roce_v2_qp_context
*context
;
3176 struct hns_roce_v2_qp_context
*qpc_mask
;
3177 struct device
*dev
= hr_dev
->dev
;
3180 context
= kcalloc(2, sizeof(*context
), GFP_KERNEL
);
3184 qpc_mask
= context
+ 1;
3186 * In v2 engine, software pass context and context mask to hardware
3187 * when modifying qp. If software need modify some fields in context,
3188 * we should set all bits of the relevant fields in context mask to
3189 * 0 at the same time, else set them to 0x1.
3191 memset(qpc_mask
, 0xff, sizeof(*qpc_mask
));
3192 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3193 modify_qp_reset_to_init(ibqp
, attr
, attr_mask
, context
,
3195 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3196 modify_qp_init_to_init(ibqp
, attr
, attr_mask
, context
,
3198 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3199 ret
= modify_qp_init_to_rtr(ibqp
, attr
, attr_mask
, context
,
3203 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3204 ret
= modify_qp_rtr_to_rts(ibqp
, attr
, attr_mask
, context
,
3208 } else if ((cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RTS
) ||
3209 (cur_state
== IB_QPS_SQE
&& new_state
== IB_QPS_RTS
) ||
3210 (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
) ||
3211 (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
) ||
3212 (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_RTS
) ||
3213 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RESET
) ||
3214 (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RESET
) ||
3215 (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RESET
) ||
3216 (cur_state
== IB_QPS_ERR
&& new_state
== IB_QPS_RESET
) ||
3217 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_ERR
) ||
3218 (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_ERR
) ||
3219 (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_ERR
) ||
3220 (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_ERR
) ||
3221 (cur_state
== IB_QPS_SQE
&& new_state
== IB_QPS_ERR
) ||
3222 (cur_state
== IB_QPS_ERR
&& new_state
== IB_QPS_ERR
)) {
3226 dev_err(dev
, "Illegal state for QP!\n");
3230 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
3231 set_access_flags(hr_qp
, context
, qpc_mask
, attr
, attr_mask
);
3233 /* Every status migrate must change state */
3234 roce_set_field(context
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_QP_ST_M
,
3235 V2_QPC_BYTE_60_QP_ST_S
, new_state
);
3236 roce_set_field(qpc_mask
->byte_60_qpst_mapid
, V2_QPC_BYTE_60_QP_ST_M
,
3237 V2_QPC_BYTE_60_QP_ST_S
, 0);
3239 /* SW pass context to HW */
3240 ret
= hns_roce_v2_qp_modify(hr_dev
, &hr_qp
->mtt
, cur_state
, new_state
,
3243 dev_err(dev
, "hns_roce_qp_modify failed(%d)\n", ret
);
3247 hr_qp
->state
= new_state
;
3249 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3250 hr_qp
->atomic_rd_en
= attr
->qp_access_flags
;
3252 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3253 hr_qp
->resp_depth
= attr
->max_dest_rd_atomic
;
3254 if (attr_mask
& IB_QP_PORT
) {
3255 hr_qp
->port
= attr
->port_num
- 1;
3256 hr_qp
->phy_port
= hr_dev
->iboe
.phy_port
[hr_qp
->port
];
3259 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
3260 hns_roce_v2_cq_clean(to_hr_cq(ibqp
->recv_cq
), hr_qp
->qpn
,
3261 ibqp
->srq
? to_hr_srq(ibqp
->srq
) : NULL
);
3262 if (ibqp
->send_cq
!= ibqp
->recv_cq
)
3263 hns_roce_v2_cq_clean(to_hr_cq(ibqp
->send_cq
),
3270 hr_qp
->sq_next_wqe
= 0;
3271 hr_qp
->next_sge
= 0;
3272 if (hr_qp
->rq
.wqe_cnt
)
3273 *hr_qp
->rdb
.db_record
= 0;
3281 static inline enum ib_qp_state
to_ib_qp_st(enum hns_roce_v2_qp_state state
)
3284 case HNS_ROCE_QP_ST_RST
: return IB_QPS_RESET
;
3285 case HNS_ROCE_QP_ST_INIT
: return IB_QPS_INIT
;
3286 case HNS_ROCE_QP_ST_RTR
: return IB_QPS_RTR
;
3287 case HNS_ROCE_QP_ST_RTS
: return IB_QPS_RTS
;
3288 case HNS_ROCE_QP_ST_SQ_DRAINING
:
3289 case HNS_ROCE_QP_ST_SQD
: return IB_QPS_SQD
;
3290 case HNS_ROCE_QP_ST_SQER
: return IB_QPS_SQE
;
3291 case HNS_ROCE_QP_ST_ERR
: return IB_QPS_ERR
;
3296 static int hns_roce_v2_query_qpc(struct hns_roce_dev
*hr_dev
,
3297 struct hns_roce_qp
*hr_qp
,
3298 struct hns_roce_v2_qp_context
*hr_context
)
3300 struct hns_roce_cmd_mailbox
*mailbox
;
3303 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
3304 if (IS_ERR(mailbox
))
3305 return PTR_ERR(mailbox
);
3307 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, hr_qp
->qpn
, 0,
3308 HNS_ROCE_CMD_QUERY_QPC
,
3309 HNS_ROCE_CMD_TIMEOUT_MSECS
);
3311 dev_err(hr_dev
->dev
, "QUERY QP cmd process error\n");
3315 memcpy(hr_context
, mailbox
->buf
, sizeof(*hr_context
));
3318 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
3322 static int hns_roce_v2_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
3324 struct ib_qp_init_attr
*qp_init_attr
)
3326 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3327 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3328 struct hns_roce_v2_qp_context
*context
;
3329 struct device
*dev
= hr_dev
->dev
;
3334 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
3338 memset(qp_attr
, 0, sizeof(*qp_attr
));
3339 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
3341 mutex_lock(&hr_qp
->mutex
);
3343 if (hr_qp
->state
== IB_QPS_RESET
) {
3344 qp_attr
->qp_state
= IB_QPS_RESET
;
3349 ret
= hns_roce_v2_query_qpc(hr_dev
, hr_qp
, context
);
3351 dev_err(dev
, "query qpc error\n");
3356 state
= roce_get_field(context
->byte_60_qpst_mapid
,
3357 V2_QPC_BYTE_60_QP_ST_M
, V2_QPC_BYTE_60_QP_ST_S
);
3358 tmp_qp_state
= to_ib_qp_st((enum hns_roce_v2_qp_state
)state
);
3359 if (tmp_qp_state
== -1) {
3360 dev_err(dev
, "Illegal ib_qp_state\n");
3364 hr_qp
->state
= (u8
)tmp_qp_state
;
3365 qp_attr
->qp_state
= (enum ib_qp_state
)hr_qp
->state
;
3366 qp_attr
->path_mtu
= (enum ib_mtu
)roce_get_field(context
->byte_24_mtu_tc
,
3367 V2_QPC_BYTE_24_MTU_M
,
3368 V2_QPC_BYTE_24_MTU_S
);
3369 qp_attr
->path_mig_state
= IB_MIG_ARMED
;
3370 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
3371 if (hr_qp
->ibqp
.qp_type
== IB_QPT_UD
)
3372 qp_attr
->qkey
= V2_QKEY_VAL
;
3374 qp_attr
->rq_psn
= roce_get_field(context
->byte_108_rx_reqepsn
,
3375 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
3376 V2_QPC_BYTE_108_RX_REQ_EPSN_S
);
3377 qp_attr
->sq_psn
= (u32
)roce_get_field(context
->byte_172_sq_psn
,
3378 V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
3379 V2_QPC_BYTE_172_SQ_CUR_PSN_S
);
3380 qp_attr
->dest_qp_num
= (u8
)roce_get_field(context
->byte_56_dqpn_err
,
3381 V2_QPC_BYTE_56_DQPN_M
,
3382 V2_QPC_BYTE_56_DQPN_S
);
3383 qp_attr
->qp_access_flags
= ((roce_get_bit(context
->byte_76_srqn_op_en
,
3384 V2_QPC_BYTE_76_RRE_S
)) << 2) |
3385 ((roce_get_bit(context
->byte_76_srqn_op_en
,
3386 V2_QPC_BYTE_76_RWE_S
)) << 1) |
3387 ((roce_get_bit(context
->byte_76_srqn_op_en
,
3388 V2_QPC_BYTE_76_ATE_S
)) << 3);
3389 if (hr_qp
->ibqp
.qp_type
== IB_QPT_RC
||
3390 hr_qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3391 struct ib_global_route
*grh
=
3392 rdma_ah_retrieve_grh(&qp_attr
->ah_attr
);
3394 rdma_ah_set_sl(&qp_attr
->ah_attr
,
3395 roce_get_field(context
->byte_28_at_fl
,
3396 V2_QPC_BYTE_28_SL_M
,
3397 V2_QPC_BYTE_28_SL_S
));
3398 grh
->flow_label
= roce_get_field(context
->byte_28_at_fl
,
3399 V2_QPC_BYTE_28_FL_M
,
3400 V2_QPC_BYTE_28_FL_S
);
3401 grh
->sgid_index
= roce_get_field(context
->byte_20_smac_sgid_idx
,
3402 V2_QPC_BYTE_20_SGID_IDX_M
,
3403 V2_QPC_BYTE_20_SGID_IDX_S
);
3404 grh
->hop_limit
= roce_get_field(context
->byte_24_mtu_tc
,
3405 V2_QPC_BYTE_24_HOP_LIMIT_M
,
3406 V2_QPC_BYTE_24_HOP_LIMIT_S
);
3407 grh
->traffic_class
= roce_get_field(context
->byte_24_mtu_tc
,
3408 V2_QPC_BYTE_24_TC_M
,
3409 V2_QPC_BYTE_24_TC_S
);
3411 memcpy(grh
->dgid
.raw
, context
->dgid
, sizeof(grh
->dgid
.raw
));
3414 qp_attr
->port_num
= hr_qp
->port
+ 1;
3415 qp_attr
->sq_draining
= 0;
3416 qp_attr
->max_rd_atomic
= 1 << roce_get_field(context
->byte_208_irrl
,
3417 V2_QPC_BYTE_208_SR_MAX_M
,
3418 V2_QPC_BYTE_208_SR_MAX_S
);
3419 qp_attr
->max_dest_rd_atomic
= 1 << roce_get_field(context
->byte_140_raq
,
3420 V2_QPC_BYTE_140_RR_MAX_M
,
3421 V2_QPC_BYTE_140_RR_MAX_S
);
3422 qp_attr
->min_rnr_timer
= (u8
)roce_get_field(context
->byte_80_rnr_rx_cqn
,
3423 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
3424 V2_QPC_BYTE_80_MIN_RNR_TIME_S
);
3425 qp_attr
->timeout
= (u8
)roce_get_field(context
->byte_28_at_fl
,
3426 V2_QPC_BYTE_28_AT_M
,
3427 V2_QPC_BYTE_28_AT_S
);
3428 qp_attr
->retry_cnt
= roce_get_field(context
->byte_212_lsn
,
3429 V2_QPC_BYTE_212_RETRY_CNT_M
,
3430 V2_QPC_BYTE_212_RETRY_CNT_S
);
3431 qp_attr
->rnr_retry
= context
->rq_rnr_timer
;
3434 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3435 qp_attr
->cap
.max_recv_wr
= hr_qp
->rq
.wqe_cnt
;
3436 qp_attr
->cap
.max_recv_sge
= hr_qp
->rq
.max_gs
;
3438 if (!ibqp
->uobject
) {
3439 qp_attr
->cap
.max_send_wr
= hr_qp
->sq
.wqe_cnt
;
3440 qp_attr
->cap
.max_send_sge
= hr_qp
->sq
.max_gs
;
3442 qp_attr
->cap
.max_send_wr
= 0;
3443 qp_attr
->cap
.max_send_sge
= 0;
3446 qp_init_attr
->cap
= qp_attr
->cap
;
3449 mutex_unlock(&hr_qp
->mutex
);
3454 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev
*hr_dev
,
3455 struct hns_roce_qp
*hr_qp
,
3458 struct hns_roce_cq
*send_cq
, *recv_cq
;
3459 struct device
*dev
= hr_dev
->dev
;
3462 if (hr_qp
->ibqp
.qp_type
== IB_QPT_RC
&& hr_qp
->state
!= IB_QPS_RESET
) {
3463 /* Modify qp to reset before destroying qp */
3464 ret
= hns_roce_v2_modify_qp(&hr_qp
->ibqp
, NULL
, 0,
3465 hr_qp
->state
, IB_QPS_RESET
);
3467 dev_err(dev
, "modify QP %06lx to ERR failed.\n",
3473 send_cq
= to_hr_cq(hr_qp
->ibqp
.send_cq
);
3474 recv_cq
= to_hr_cq(hr_qp
->ibqp
.recv_cq
);
3476 hns_roce_lock_cqs(send_cq
, recv_cq
);
3479 __hns_roce_v2_cq_clean(recv_cq
, hr_qp
->qpn
, hr_qp
->ibqp
.srq
?
3480 to_hr_srq(hr_qp
->ibqp
.srq
) : NULL
);
3481 if (send_cq
!= recv_cq
)
3482 __hns_roce_v2_cq_clean(send_cq
, hr_qp
->qpn
, NULL
);
3485 hns_roce_qp_remove(hr_dev
, hr_qp
);
3487 hns_roce_unlock_cqs(send_cq
, recv_cq
);
3489 hns_roce_qp_free(hr_dev
, hr_qp
);
3491 /* Not special_QP, free their QPN */
3492 if ((hr_qp
->ibqp
.qp_type
== IB_QPT_RC
) ||
3493 (hr_qp
->ibqp
.qp_type
== IB_QPT_UC
) ||
3494 (hr_qp
->ibqp
.qp_type
== IB_QPT_UD
))
3495 hns_roce_release_range_qp(hr_dev
, hr_qp
->qpn
, 1);
3497 hns_roce_mtt_cleanup(hr_dev
, &hr_qp
->mtt
);
3500 if (hr_qp
->rq
.wqe_cnt
&& (hr_qp
->rdb_en
== 1))
3501 hns_roce_db_unmap_user(
3502 to_hr_ucontext(hr_qp
->ibqp
.uobject
->context
),
3504 ib_umem_release(hr_qp
->umem
);
3506 kfree(hr_qp
->sq
.wrid
);
3507 kfree(hr_qp
->rq
.wrid
);
3508 hns_roce_buf_free(hr_dev
, hr_qp
->buff_size
, &hr_qp
->hr_buf
);
3509 if (hr_qp
->rq
.wqe_cnt
)
3510 hns_roce_free_db(hr_dev
, &hr_qp
->rdb
);
3513 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) {
3514 kfree(hr_qp
->rq_inl_buf
.wqe_list
[0].sg_list
);
3515 kfree(hr_qp
->rq_inl_buf
.wqe_list
);
3521 static int hns_roce_v2_destroy_qp(struct ib_qp
*ibqp
)
3523 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3524 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3527 ret
= hns_roce_v2_destroy_qp_common(hr_dev
, hr_qp
, !!ibqp
->pd
->uobject
);
3529 dev_err(hr_dev
->dev
, "Destroy qp failed(%d)\n", ret
);
3533 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
)
3534 kfree(hr_to_hr_sqp(hr_qp
));
3541 static int hns_roce_v2_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
3543 struct hns_roce_dev
*hr_dev
= to_hr_dev(cq
->device
);
3544 struct hns_roce_v2_cq_context
*cq_context
;
3545 struct hns_roce_cq
*hr_cq
= to_hr_cq(cq
);
3546 struct hns_roce_v2_cq_context
*cqc_mask
;
3547 struct hns_roce_cmd_mailbox
*mailbox
;
3550 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
3551 if (IS_ERR(mailbox
))
3552 return PTR_ERR(mailbox
);
3554 cq_context
= mailbox
->buf
;
3555 cqc_mask
= (struct hns_roce_v2_cq_context
*)mailbox
->buf
+ 1;
3557 memset(cqc_mask
, 0xff, sizeof(*cqc_mask
));
3559 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
3560 V2_CQC_BYTE_56_CQ_MAX_CNT_M
, V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
3562 roce_set_field(cqc_mask
->byte_56_cqe_period_maxcnt
,
3563 V2_CQC_BYTE_56_CQ_MAX_CNT_M
, V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
3565 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
3566 V2_CQC_BYTE_56_CQ_PERIOD_M
, V2_CQC_BYTE_56_CQ_PERIOD_S
,
3568 roce_set_field(cqc_mask
->byte_56_cqe_period_maxcnt
,
3569 V2_CQC_BYTE_56_CQ_PERIOD_M
, V2_CQC_BYTE_56_CQ_PERIOD_S
,
3572 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, hr_cq
->cqn
, 1,
3573 HNS_ROCE_CMD_MODIFY_CQC
,
3574 HNS_ROCE_CMD_TIMEOUT_MSECS
);
3575 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
3577 dev_err(hr_dev
->dev
, "MODIFY CQ Failed to cmd mailbox.\n");
3582 static void set_eq_cons_index_v2(struct hns_roce_eq
*eq
)
3589 if (eq
->type_flag
== HNS_ROCE_AEQ
) {
3590 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_CMD_M
,
3591 HNS_ROCE_V2_EQ_DB_CMD_S
,
3592 eq
->arm_st
== HNS_ROCE_V2_EQ_ALWAYS_ARMED
?
3593 HNS_ROCE_EQ_DB_CMD_AEQ
:
3594 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED
);
3596 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_TAG_M
,
3597 HNS_ROCE_V2_EQ_DB_TAG_S
, eq
->eqn
);
3599 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_CMD_M
,
3600 HNS_ROCE_V2_EQ_DB_CMD_S
,
3601 eq
->arm_st
== HNS_ROCE_V2_EQ_ALWAYS_ARMED
?
3602 HNS_ROCE_EQ_DB_CMD_CEQ
:
3603 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED
);
3606 roce_set_field(doorbell
[1], HNS_ROCE_V2_EQ_DB_PARA_M
,
3607 HNS_ROCE_V2_EQ_DB_PARA_S
,
3608 (eq
->cons_index
& HNS_ROCE_V2_CONS_IDX_M
));
3610 hns_roce_write64_k(doorbell
, eq
->doorbell
);
3613 static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev
*hr_dev
,
3614 struct hns_roce_aeqe
*aeqe
,
3617 struct device
*dev
= hr_dev
->dev
;
3620 dev_warn(dev
, "Local work queue catastrophic error.\n");
3621 sub_type
= roce_get_field(aeqe
->asyn
, HNS_ROCE_V2_AEQE_SUB_TYPE_M
,
3622 HNS_ROCE_V2_AEQE_SUB_TYPE_S
);
3624 case HNS_ROCE_LWQCE_QPC_ERROR
:
3625 dev_warn(dev
, "QP %d, QPC error.\n", qpn
);
3627 case HNS_ROCE_LWQCE_MTU_ERROR
:
3628 dev_warn(dev
, "QP %d, MTU error.\n", qpn
);
3630 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR
:
3631 dev_warn(dev
, "QP %d, WQE BA addr error.\n", qpn
);
3633 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR
:
3634 dev_warn(dev
, "QP %d, WQE addr error.\n", qpn
);
3636 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR
:
3637 dev_warn(dev
, "QP %d, WQE shift error.\n", qpn
);
3640 dev_err(dev
, "Unhandled sub_event type %d.\n", sub_type
);
3645 static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev
*hr_dev
,
3646 struct hns_roce_aeqe
*aeqe
, u32 qpn
)
3648 struct device
*dev
= hr_dev
->dev
;
3651 dev_warn(dev
, "Local access violation work queue error.\n");
3652 sub_type
= roce_get_field(aeqe
->asyn
, HNS_ROCE_V2_AEQE_SUB_TYPE_M
,
3653 HNS_ROCE_V2_AEQE_SUB_TYPE_S
);
3655 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION
:
3656 dev_warn(dev
, "QP %d, R_key violation.\n", qpn
);
3658 case HNS_ROCE_LAVWQE_LENGTH_ERROR
:
3659 dev_warn(dev
, "QP %d, length error.\n", qpn
);
3661 case HNS_ROCE_LAVWQE_VA_ERROR
:
3662 dev_warn(dev
, "QP %d, VA error.\n", qpn
);
3664 case HNS_ROCE_LAVWQE_PD_ERROR
:
3665 dev_err(dev
, "QP %d, PD error.\n", qpn
);
3667 case HNS_ROCE_LAVWQE_RW_ACC_ERROR
:
3668 dev_warn(dev
, "QP %d, rw acc error.\n", qpn
);
3670 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR
:
3671 dev_warn(dev
, "QP %d, key state error.\n", qpn
);
3673 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR
:
3674 dev_warn(dev
, "QP %d, MR operation error.\n", qpn
);
3677 dev_err(dev
, "Unhandled sub_event type %d.\n", sub_type
);
3682 static void hns_roce_v2_qp_err_handle(struct hns_roce_dev
*hr_dev
,
3683 struct hns_roce_aeqe
*aeqe
,
3686 struct device
*dev
= hr_dev
->dev
;
3689 qpn
= roce_get_field(aeqe
->event
.qp_event
.qp
,
3690 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M
,
3691 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S
);
3693 switch (event_type
) {
3694 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
3695 dev_warn(dev
, "Communication established.\n");
3697 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
3698 dev_warn(dev
, "Send queue drained.\n");
3700 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
3701 hns_roce_v2_wq_catas_err_handle(hr_dev
, aeqe
, qpn
);
3703 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
3704 dev_warn(dev
, "Invalid request local work queue error.\n");
3706 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
3707 hns_roce_v2_local_wq_access_err_handle(hr_dev
, aeqe
, qpn
);
3713 hns_roce_qp_event(hr_dev
, qpn
, event_type
);
3716 static void hns_roce_v2_cq_err_handle(struct hns_roce_dev
*hr_dev
,
3717 struct hns_roce_aeqe
*aeqe
,
3720 struct device
*dev
= hr_dev
->dev
;
3723 cqn
= roce_get_field(aeqe
->event
.cq_event
.cq
,
3724 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M
,
3725 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S
);
3727 switch (event_type
) {
3728 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
3729 dev_warn(dev
, "CQ 0x%x access err.\n", cqn
);
3731 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
3732 dev_warn(dev
, "CQ 0x%x overflow\n", cqn
);
3738 hns_roce_cq_event(hr_dev
, cqn
, event_type
);
3741 static struct hns_roce_aeqe
*get_aeqe_v2(struct hns_roce_eq
*eq
, u32 entry
)
3746 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
3747 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_AEQ_ENTRY_SIZE
;
3749 return (struct hns_roce_aeqe
*)((char *)(eq
->buf_list
->buf
) +
3753 static struct hns_roce_aeqe
*mhop_get_aeqe(struct hns_roce_eq
*eq
, u32 entry
)
3758 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
3760 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_AEQ_ENTRY_SIZE
;
3762 if (eq
->hop_num
== HNS_ROCE_HOP_NUM_0
)
3763 return (struct hns_roce_aeqe
*)((u8
*)(eq
->bt_l0
) +
3766 return (struct hns_roce_aeqe
*)((u8
*)
3767 (eq
->buf
[off
/ buf_chk_sz
]) + off
% buf_chk_sz
);
3770 static struct hns_roce_aeqe
*next_aeqe_sw_v2(struct hns_roce_eq
*eq
)
3772 struct hns_roce_aeqe
*aeqe
;
3775 aeqe
= get_aeqe_v2(eq
, eq
->cons_index
);
3777 aeqe
= mhop_get_aeqe(eq
, eq
->cons_index
);
3779 return (roce_get_bit(aeqe
->asyn
, HNS_ROCE_V2_AEQ_AEQE_OWNER_S
) ^
3780 !!(eq
->cons_index
& eq
->entries
)) ? aeqe
: NULL
;
3783 static int hns_roce_v2_aeq_int(struct hns_roce_dev
*hr_dev
,
3784 struct hns_roce_eq
*eq
)
3786 struct device
*dev
= hr_dev
->dev
;
3787 struct hns_roce_aeqe
*aeqe
;
3791 while ((aeqe
= next_aeqe_sw_v2(eq
))) {
3793 /* Make sure we read AEQ entry after we have checked the
3798 event_type
= roce_get_field(aeqe
->asyn
,
3799 HNS_ROCE_V2_AEQE_EVENT_TYPE_M
,
3800 HNS_ROCE_V2_AEQE_EVENT_TYPE_S
);
3802 switch (event_type
) {
3803 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
3804 dev_warn(dev
, "Path migrated succeeded.\n");
3806 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
3807 dev_warn(dev
, "Path migration failed.\n");
3809 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
3810 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
3811 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
3812 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
3813 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
3814 hns_roce_v2_qp_err_handle(hr_dev
, aeqe
, event_type
);
3816 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
3817 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
3818 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
3819 dev_warn(dev
, "SRQ not support.\n");
3821 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
3822 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
3823 hns_roce_v2_cq_err_handle(hr_dev
, aeqe
, event_type
);
3825 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
:
3826 dev_warn(dev
, "DB overflow.\n");
3828 case HNS_ROCE_EVENT_TYPE_MB
:
3829 hns_roce_cmd_event(hr_dev
,
3830 le16_to_cpu(aeqe
->event
.cmd
.token
),
3831 aeqe
->event
.cmd
.status
,
3832 le64_to_cpu(aeqe
->event
.cmd
.out_param
));
3834 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW
:
3835 dev_warn(dev
, "CEQ overflow.\n");
3837 case HNS_ROCE_EVENT_TYPE_FLR
:
3838 dev_warn(dev
, "Function level reset.\n");
3841 dev_err(dev
, "Unhandled event %d on EQ %d at idx %u.\n",
3842 event_type
, eq
->eqn
, eq
->cons_index
);
3849 if (eq
->cons_index
> (2 * eq
->entries
- 1)) {
3850 dev_warn(dev
, "cons_index overflow, set back to 0.\n");
3855 set_eq_cons_index_v2(eq
);
3859 static struct hns_roce_ceqe
*get_ceqe_v2(struct hns_roce_eq
*eq
, u32 entry
)
3864 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
3865 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_CEQ_ENTRY_SIZE
;
3867 return (struct hns_roce_ceqe
*)((char *)(eq
->buf_list
->buf
) +
3871 static struct hns_roce_ceqe
*mhop_get_ceqe(struct hns_roce_eq
*eq
, u32 entry
)
3876 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
3878 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_CEQ_ENTRY_SIZE
;
3880 if (eq
->hop_num
== HNS_ROCE_HOP_NUM_0
)
3881 return (struct hns_roce_ceqe
*)((u8
*)(eq
->bt_l0
) +
3884 return (struct hns_roce_ceqe
*)((u8
*)(eq
->buf
[off
/
3885 buf_chk_sz
]) + off
% buf_chk_sz
);
3888 static struct hns_roce_ceqe
*next_ceqe_sw_v2(struct hns_roce_eq
*eq
)
3890 struct hns_roce_ceqe
*ceqe
;
3893 ceqe
= get_ceqe_v2(eq
, eq
->cons_index
);
3895 ceqe
= mhop_get_ceqe(eq
, eq
->cons_index
);
3897 return (!!(roce_get_bit(ceqe
->comp
, HNS_ROCE_V2_CEQ_CEQE_OWNER_S
))) ^
3898 (!!(eq
->cons_index
& eq
->entries
)) ? ceqe
: NULL
;
3901 static int hns_roce_v2_ceq_int(struct hns_roce_dev
*hr_dev
,
3902 struct hns_roce_eq
*eq
)
3904 struct device
*dev
= hr_dev
->dev
;
3905 struct hns_roce_ceqe
*ceqe
;
3909 while ((ceqe
= next_ceqe_sw_v2(eq
))) {
3911 /* Make sure we read CEQ entry after we have checked the
3916 cqn
= roce_get_field(ceqe
->comp
,
3917 HNS_ROCE_V2_CEQE_COMP_CQN_M
,
3918 HNS_ROCE_V2_CEQE_COMP_CQN_S
);
3920 hns_roce_cq_completion(hr_dev
, cqn
);
3925 if (eq
->cons_index
> (2 * eq
->entries
- 1)) {
3926 dev_warn(dev
, "cons_index overflow, set back to 0.\n");
3931 set_eq_cons_index_v2(eq
);
3936 static irqreturn_t
hns_roce_v2_msix_interrupt_eq(int irq
, void *eq_ptr
)
3938 struct hns_roce_eq
*eq
= eq_ptr
;
3939 struct hns_roce_dev
*hr_dev
= eq
->hr_dev
;
3942 if (eq
->type_flag
== HNS_ROCE_CEQ
)
3943 /* Completion event interrupt */
3944 int_work
= hns_roce_v2_ceq_int(hr_dev
, eq
);
3946 /* Asychronous event interrupt */
3947 int_work
= hns_roce_v2_aeq_int(hr_dev
, eq
);
3949 return IRQ_RETVAL(int_work
);
3952 static irqreturn_t
hns_roce_v2_msix_interrupt_abn(int irq
, void *dev_id
)
3954 struct hns_roce_dev
*hr_dev
= dev_id
;
3955 struct device
*dev
= hr_dev
->dev
;
3960 /* Abnormal interrupt */
3961 int_st
= roce_read(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
);
3962 int_en
= roce_read(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
);
3964 if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S
)) {
3965 dev_err(dev
, "AEQ overflow!\n");
3967 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S
, 1);
3968 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
3970 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
3971 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
3974 } else if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S
)) {
3975 dev_err(dev
, "BUS ERR!\n");
3977 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S
, 1);
3978 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
3980 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
3981 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
3984 } else if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S
)) {
3985 dev_err(dev
, "OTHER ERR!\n");
3987 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S
, 1);
3988 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
3990 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
3991 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
3995 dev_err(dev
, "There is no abnormal irq found!\n");
3997 return IRQ_RETVAL(int_work
);
4000 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev
*hr_dev
,
4001 int eq_num
, int enable_flag
)
4005 if (enable_flag
== EQ_ENABLE
) {
4006 for (i
= 0; i
< eq_num
; i
++)
4007 roce_write(hr_dev
, ROCEE_VF_EVENT_INT_EN_REG
+
4009 HNS_ROCE_V2_VF_EVENT_INT_EN_M
);
4011 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
,
4012 HNS_ROCE_V2_VF_ABN_INT_EN_M
);
4013 roce_write(hr_dev
, ROCEE_VF_ABN_INT_CFG_REG
,
4014 HNS_ROCE_V2_VF_ABN_INT_CFG_M
);
4016 for (i
= 0; i
< eq_num
; i
++)
4017 roce_write(hr_dev
, ROCEE_VF_EVENT_INT_EN_REG
+
4019 HNS_ROCE_V2_VF_EVENT_INT_EN_M
& 0x0);
4021 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
,
4022 HNS_ROCE_V2_VF_ABN_INT_EN_M
& 0x0);
4023 roce_write(hr_dev
, ROCEE_VF_ABN_INT_CFG_REG
,
4024 HNS_ROCE_V2_VF_ABN_INT_CFG_M
& 0x0);
4028 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev
*hr_dev
, int eqn
)
4030 struct device
*dev
= hr_dev
->dev
;
4033 if (eqn
< hr_dev
->caps
.num_comp_vectors
)
4034 ret
= hns_roce_cmd_mbox(hr_dev
, 0, 0, eqn
& HNS_ROCE_V2_EQN_M
,
4035 0, HNS_ROCE_CMD_DESTROY_CEQC
,
4036 HNS_ROCE_CMD_TIMEOUT_MSECS
);
4038 ret
= hns_roce_cmd_mbox(hr_dev
, 0, 0, eqn
& HNS_ROCE_V2_EQN_M
,
4039 0, HNS_ROCE_CMD_DESTROY_AEQC
,
4040 HNS_ROCE_CMD_TIMEOUT_MSECS
);
4042 dev_err(dev
, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn
);
4045 static void hns_roce_mhop_free_eq(struct hns_roce_dev
*hr_dev
,
4046 struct hns_roce_eq
*eq
)
4048 struct device
*dev
= hr_dev
->dev
;
4059 mhop_num
= hr_dev
->caps
.eqe_hop_num
;
4060 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
4061 bt_chk_sz
= 1 << (hr_dev
->caps
.eqe_ba_pg_sz
+ PAGE_SHIFT
);
4062 ba_num
= (PAGE_ALIGN(eq
->entries
* eq
->eqe_size
) + buf_chk_sz
- 1) /
4066 if (mhop_num
== HNS_ROCE_HOP_NUM_0
) {
4067 dma_free_coherent(dev
, (unsigned int)(eq
->entries
*
4068 eq
->eqe_size
), eq
->bt_l0
, eq
->l0_dma
);
4072 /* hop_num = 1 or hop = 2 */
4073 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
4074 if (mhop_num
== 1) {
4075 for (i
= 0; i
< eq
->l0_last_num
; i
++) {
4076 if (i
== eq
->l0_last_num
- 1) {
4077 eqe_alloc
= i
* (buf_chk_sz
/ eq
->eqe_size
);
4078 size
= (eq
->entries
- eqe_alloc
) * eq
->eqe_size
;
4079 dma_free_coherent(dev
, size
, eq
->buf
[i
],
4083 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[i
],
4086 } else if (mhop_num
== 2) {
4087 for (i
= 0; i
< eq
->l0_last_num
; i
++) {
4088 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
4091 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
4092 idx
= i
* (bt_chk_sz
/ 8) + j
;
4093 if ((i
== eq
->l0_last_num
- 1)
4094 && j
== eq
->l1_last_num
- 1) {
4095 eqe_alloc
= (buf_chk_sz
/ eq
->eqe_size
)
4097 size
= (eq
->entries
- eqe_alloc
)
4099 dma_free_coherent(dev
, size
,
4104 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[idx
],
4119 static void hns_roce_v2_free_eq(struct hns_roce_dev
*hr_dev
,
4120 struct hns_roce_eq
*eq
)
4124 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4126 if (hr_dev
->caps
.eqe_hop_num
) {
4127 hns_roce_mhop_free_eq(hr_dev
, eq
);
4132 dma_free_coherent(hr_dev
->dev
, buf_chk_sz
,
4133 eq
->buf_list
->buf
, eq
->buf_list
->map
);
4136 static void hns_roce_config_eqc(struct hns_roce_dev
*hr_dev
,
4137 struct hns_roce_eq
*eq
,
4140 struct hns_roce_eq_context
*eqc
;
4143 memset(eqc
, 0, sizeof(struct hns_roce_eq_context
));
4146 eq
->doorbell
= hr_dev
->reg_base
+ ROCEE_VF_EQ_DB_CFG0_REG
;
4147 eq
->hop_num
= hr_dev
->caps
.eqe_hop_num
;
4149 eq
->over_ignore
= HNS_ROCE_V2_EQ_OVER_IGNORE_0
;
4150 eq
->coalesce
= HNS_ROCE_V2_EQ_COALESCE_0
;
4151 eq
->arm_st
= HNS_ROCE_V2_EQ_ALWAYS_ARMED
;
4152 eq
->eqe_ba_pg_sz
= hr_dev
->caps
.eqe_ba_pg_sz
;
4153 eq
->eqe_buf_pg_sz
= hr_dev
->caps
.eqe_buf_pg_sz
;
4154 eq
->shift
= ilog2((unsigned int)eq
->entries
);
4157 eq
->eqe_ba
= eq
->buf_list
->map
;
4159 eq
->eqe_ba
= eq
->l0_dma
;
4162 roce_set_field(eqc
->byte_4
,
4163 HNS_ROCE_EQC_EQ_ST_M
,
4164 HNS_ROCE_EQC_EQ_ST_S
,
4165 HNS_ROCE_V2_EQ_STATE_VALID
);
4167 /* set eqe hop num */
4168 roce_set_field(eqc
->byte_4
,
4169 HNS_ROCE_EQC_HOP_NUM_M
,
4170 HNS_ROCE_EQC_HOP_NUM_S
, eq
->hop_num
);
4172 /* set eqc over_ignore */
4173 roce_set_field(eqc
->byte_4
,
4174 HNS_ROCE_EQC_OVER_IGNORE_M
,
4175 HNS_ROCE_EQC_OVER_IGNORE_S
, eq
->over_ignore
);
4177 /* set eqc coalesce */
4178 roce_set_field(eqc
->byte_4
,
4179 HNS_ROCE_EQC_COALESCE_M
,
4180 HNS_ROCE_EQC_COALESCE_S
, eq
->coalesce
);
4182 /* set eqc arm_state */
4183 roce_set_field(eqc
->byte_4
,
4184 HNS_ROCE_EQC_ARM_ST_M
,
4185 HNS_ROCE_EQC_ARM_ST_S
, eq
->arm_st
);
4188 roce_set_field(eqc
->byte_4
,
4190 HNS_ROCE_EQC_EQN_S
, eq
->eqn
);
4193 roce_set_field(eqc
->byte_4
,
4194 HNS_ROCE_EQC_EQE_CNT_M
,
4195 HNS_ROCE_EQC_EQE_CNT_S
,
4196 HNS_ROCE_EQ_INIT_EQE_CNT
);
4198 /* set eqe_ba_pg_sz */
4199 roce_set_field(eqc
->byte_8
,
4200 HNS_ROCE_EQC_BA_PG_SZ_M
,
4201 HNS_ROCE_EQC_BA_PG_SZ_S
,
4202 eq
->eqe_ba_pg_sz
+ PG_SHIFT_OFFSET
);
4204 /* set eqe_buf_pg_sz */
4205 roce_set_field(eqc
->byte_8
,
4206 HNS_ROCE_EQC_BUF_PG_SZ_M
,
4207 HNS_ROCE_EQC_BUF_PG_SZ_S
,
4208 eq
->eqe_buf_pg_sz
+ PG_SHIFT_OFFSET
);
4210 /* set eq_producer_idx */
4211 roce_set_field(eqc
->byte_8
,
4212 HNS_ROCE_EQC_PROD_INDX_M
,
4213 HNS_ROCE_EQC_PROD_INDX_S
,
4214 HNS_ROCE_EQ_INIT_PROD_IDX
);
4216 /* set eq_max_cnt */
4217 roce_set_field(eqc
->byte_12
,
4218 HNS_ROCE_EQC_MAX_CNT_M
,
4219 HNS_ROCE_EQC_MAX_CNT_S
, eq
->eq_max_cnt
);
4222 roce_set_field(eqc
->byte_12
,
4223 HNS_ROCE_EQC_PERIOD_M
,
4224 HNS_ROCE_EQC_PERIOD_S
, eq
->eq_period
);
4226 /* set eqe_report_timer */
4227 roce_set_field(eqc
->eqe_report_timer
,
4228 HNS_ROCE_EQC_REPORT_TIMER_M
,
4229 HNS_ROCE_EQC_REPORT_TIMER_S
,
4230 HNS_ROCE_EQ_INIT_REPORT_TIMER
);
4232 /* set eqe_ba [34:3] */
4233 roce_set_field(eqc
->eqe_ba0
,
4234 HNS_ROCE_EQC_EQE_BA_L_M
,
4235 HNS_ROCE_EQC_EQE_BA_L_S
, eq
->eqe_ba
>> 3);
4237 /* set eqe_ba [64:35] */
4238 roce_set_field(eqc
->eqe_ba1
,
4239 HNS_ROCE_EQC_EQE_BA_H_M
,
4240 HNS_ROCE_EQC_EQE_BA_H_S
, eq
->eqe_ba
>> 35);
4243 roce_set_field(eqc
->byte_28
,
4244 HNS_ROCE_EQC_SHIFT_M
,
4245 HNS_ROCE_EQC_SHIFT_S
, eq
->shift
);
4247 /* set eq MSI_IDX */
4248 roce_set_field(eqc
->byte_28
,
4249 HNS_ROCE_EQC_MSI_INDX_M
,
4250 HNS_ROCE_EQC_MSI_INDX_S
,
4251 HNS_ROCE_EQ_INIT_MSI_IDX
);
4253 /* set cur_eqe_ba [27:12] */
4254 roce_set_field(eqc
->byte_28
,
4255 HNS_ROCE_EQC_CUR_EQE_BA_L_M
,
4256 HNS_ROCE_EQC_CUR_EQE_BA_L_S
, eq
->cur_eqe_ba
>> 12);
4258 /* set cur_eqe_ba [59:28] */
4259 roce_set_field(eqc
->byte_32
,
4260 HNS_ROCE_EQC_CUR_EQE_BA_M_M
,
4261 HNS_ROCE_EQC_CUR_EQE_BA_M_S
, eq
->cur_eqe_ba
>> 28);
4263 /* set cur_eqe_ba [63:60] */
4264 roce_set_field(eqc
->byte_36
,
4265 HNS_ROCE_EQC_CUR_EQE_BA_H_M
,
4266 HNS_ROCE_EQC_CUR_EQE_BA_H_S
, eq
->cur_eqe_ba
>> 60);
4268 /* set eq consumer idx */
4269 roce_set_field(eqc
->byte_36
,
4270 HNS_ROCE_EQC_CONS_INDX_M
,
4271 HNS_ROCE_EQC_CONS_INDX_S
,
4272 HNS_ROCE_EQ_INIT_CONS_IDX
);
4274 /* set nex_eqe_ba[43:12] */
4275 roce_set_field(eqc
->nxt_eqe_ba0
,
4276 HNS_ROCE_EQC_NXT_EQE_BA_L_M
,
4277 HNS_ROCE_EQC_NXT_EQE_BA_L_S
, eq
->nxt_eqe_ba
>> 12);
4279 /* set nex_eqe_ba[63:44] */
4280 roce_set_field(eqc
->nxt_eqe_ba1
,
4281 HNS_ROCE_EQC_NXT_EQE_BA_H_M
,
4282 HNS_ROCE_EQC_NXT_EQE_BA_H_S
, eq
->nxt_eqe_ba
>> 44);
4285 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev
*hr_dev
,
4286 struct hns_roce_eq
*eq
)
4288 struct device
*dev
= hr_dev
->dev
;
4289 int eq_alloc_done
= 0;
4304 mhop_num
= hr_dev
->caps
.eqe_hop_num
;
4305 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
4306 bt_chk_sz
= 1 << (hr_dev
->caps
.eqe_ba_pg_sz
+ PAGE_SHIFT
);
4308 ba_num
= (PAGE_ALIGN(eq
->entries
* eq
->eqe_size
) + buf_chk_sz
- 1)
4310 bt_num
= (ba_num
+ bt_chk_sz
/ 8 - 1) / (bt_chk_sz
/ 8);
4313 if (mhop_num
== HNS_ROCE_HOP_NUM_0
) {
4314 if (eq
->entries
> buf_chk_sz
/ eq
->eqe_size
) {
4315 dev_err(dev
, "eq entries %d is larger than buf_pg_sz!",
4319 eq
->bt_l0
= dma_alloc_coherent(dev
, eq
->entries
* eq
->eqe_size
,
4320 &(eq
->l0_dma
), GFP_KERNEL
);
4324 eq
->cur_eqe_ba
= eq
->l0_dma
;
4327 memset(eq
->bt_l0
, 0, eq
->entries
* eq
->eqe_size
);
4332 eq
->buf_dma
= kcalloc(ba_num
, sizeof(*eq
->buf_dma
), GFP_KERNEL
);
4335 eq
->buf
= kcalloc(ba_num
, sizeof(*eq
->buf
), GFP_KERNEL
);
4337 goto err_kcalloc_buf
;
4339 if (mhop_num
== 2) {
4340 eq
->l1_dma
= kcalloc(bt_num
, sizeof(*eq
->l1_dma
), GFP_KERNEL
);
4342 goto err_kcalloc_l1_dma
;
4344 eq
->bt_l1
= kcalloc(bt_num
, sizeof(*eq
->bt_l1
), GFP_KERNEL
);
4346 goto err_kcalloc_bt_l1
;
4350 eq
->bt_l0
= dma_alloc_coherent(dev
, bt_chk_sz
, &eq
->l0_dma
, GFP_KERNEL
);
4352 goto err_dma_alloc_l0
;
4354 if (mhop_num
== 1) {
4355 if (ba_num
> (bt_chk_sz
/ 8))
4356 dev_err(dev
, "ba_num %d is too large for 1 hop\n",
4360 for (i
= 0; i
< bt_chk_sz
/ 8; i
++) {
4361 if (eq_buf_cnt
+ 1 < ba_num
) {
4364 eqe_alloc
= i
* (buf_chk_sz
/ eq
->eqe_size
);
4365 size
= (eq
->entries
- eqe_alloc
) * eq
->eqe_size
;
4367 eq
->buf
[i
] = dma_alloc_coherent(dev
, size
,
4371 goto err_dma_alloc_buf
;
4373 memset(eq
->buf
[i
], 0, size
);
4374 *(eq
->bt_l0
+ i
) = eq
->buf_dma
[i
];
4377 if (eq_buf_cnt
>= ba_num
)
4380 eq
->cur_eqe_ba
= eq
->buf_dma
[0];
4381 eq
->nxt_eqe_ba
= eq
->buf_dma
[1];
4383 } else if (mhop_num
== 2) {
4384 /* alloc L1 BT and buf */
4385 for (i
= 0; i
< bt_chk_sz
/ 8; i
++) {
4386 eq
->bt_l1
[i
] = dma_alloc_coherent(dev
, bt_chk_sz
,
4390 goto err_dma_alloc_l1
;
4391 *(eq
->bt_l0
+ i
) = eq
->l1_dma
[i
];
4393 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
4394 idx
= i
* bt_chk_sz
/ 8 + j
;
4395 if (eq_buf_cnt
+ 1 < ba_num
) {
4398 eqe_alloc
= (buf_chk_sz
/ eq
->eqe_size
)
4400 size
= (eq
->entries
- eqe_alloc
)
4403 eq
->buf
[idx
] = dma_alloc_coherent(dev
, size
,
4404 &(eq
->buf_dma
[idx
]),
4407 goto err_dma_alloc_buf
;
4409 memset(eq
->buf
[idx
], 0, size
);
4410 *(eq
->bt_l1
[i
] + j
) = eq
->buf_dma
[idx
];
4413 if (eq_buf_cnt
>= ba_num
) {
4422 eq
->cur_eqe_ba
= eq
->buf_dma
[0];
4423 eq
->nxt_eqe_ba
= eq
->buf_dma
[1];
4426 eq
->l0_last_num
= i
+ 1;
4428 eq
->l1_last_num
= j
+ 1;
4433 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
4436 for (i
-= 1; i
>= 0; i
--) {
4437 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
4440 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
4441 idx
= i
* bt_chk_sz
/ 8 + j
;
4442 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[idx
],
4446 goto err_dma_alloc_l0
;
4449 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
4454 for (i
-= 1; i
>= 0; i
--)
4455 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[i
],
4457 else if (mhop_num
== 2) {
4460 for (; i
>= 0; i
--) {
4461 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
4464 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
4465 if (i
== record_i
&& j
>= record_j
)
4468 idx
= i
* bt_chk_sz
/ 8 + j
;
4469 dma_free_coherent(dev
, buf_chk_sz
,
4495 static int hns_roce_v2_create_eq(struct hns_roce_dev
*hr_dev
,
4496 struct hns_roce_eq
*eq
,
4497 unsigned int eq_cmd
)
4499 struct device
*dev
= hr_dev
->dev
;
4500 struct hns_roce_cmd_mailbox
*mailbox
;
4504 /* Allocate mailbox memory */
4505 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
4506 if (IS_ERR(mailbox
))
4507 return PTR_ERR(mailbox
);
4509 if (!hr_dev
->caps
.eqe_hop_num
) {
4510 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
4512 eq
->buf_list
= kzalloc(sizeof(struct hns_roce_buf_list
),
4514 if (!eq
->buf_list
) {
4519 eq
->buf_list
->buf
= dma_alloc_coherent(dev
, buf_chk_sz
,
4520 &(eq
->buf_list
->map
),
4522 if (!eq
->buf_list
->buf
) {
4527 memset(eq
->buf_list
->buf
, 0, buf_chk_sz
);
4529 ret
= hns_roce_mhop_alloc_eq(hr_dev
, eq
);
4536 hns_roce_config_eqc(hr_dev
, eq
, mailbox
->buf
);
4538 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, eq
->eqn
, 0,
4539 eq_cmd
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
4541 dev_err(dev
, "[mailbox cmd] create eqc failed.\n");
4545 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
4550 if (!hr_dev
->caps
.eqe_hop_num
)
4551 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf_list
->buf
,
4554 hns_roce_mhop_free_eq(hr_dev
, eq
);
4559 kfree(eq
->buf_list
);
4562 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
4567 static int hns_roce_v2_init_eq_table(struct hns_roce_dev
*hr_dev
)
4569 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
4570 struct device
*dev
= hr_dev
->dev
;
4571 struct hns_roce_eq
*eq
;
4572 unsigned int eq_cmd
;
4581 other_num
= hr_dev
->caps
.num_other_vectors
;
4582 comp_num
= hr_dev
->caps
.num_comp_vectors
;
4583 aeq_num
= hr_dev
->caps
.num_aeq_vectors
;
4585 eq_num
= comp_num
+ aeq_num
;
4586 irq_num
= eq_num
+ other_num
;
4588 eq_table
->eq
= kcalloc(eq_num
, sizeof(*eq_table
->eq
), GFP_KERNEL
);
4592 for (i
= 0; i
< irq_num
; i
++) {
4593 hr_dev
->irq_names
[i
] = kzalloc(HNS_ROCE_INT_NAME_LEN
,
4595 if (!hr_dev
->irq_names
[i
]) {
4597 goto err_failed_kzalloc
;
4602 for (j
= 0; j
< eq_num
; j
++) {
4603 eq
= &eq_table
->eq
[j
];
4604 eq
->hr_dev
= hr_dev
;
4608 eq_cmd
= HNS_ROCE_CMD_CREATE_CEQC
;
4609 eq
->type_flag
= HNS_ROCE_CEQ
;
4610 eq
->entries
= hr_dev
->caps
.ceqe_depth
;
4611 eq
->eqe_size
= HNS_ROCE_CEQ_ENTRY_SIZE
;
4612 eq
->irq
= hr_dev
->irq
[j
+ other_num
+ aeq_num
];
4613 eq
->eq_max_cnt
= HNS_ROCE_CEQ_DEFAULT_BURST_NUM
;
4614 eq
->eq_period
= HNS_ROCE_CEQ_DEFAULT_INTERVAL
;
4617 eq_cmd
= HNS_ROCE_CMD_CREATE_AEQC
;
4618 eq
->type_flag
= HNS_ROCE_AEQ
;
4619 eq
->entries
= hr_dev
->caps
.aeqe_depth
;
4620 eq
->eqe_size
= HNS_ROCE_AEQ_ENTRY_SIZE
;
4621 eq
->irq
= hr_dev
->irq
[j
- comp_num
+ other_num
];
4622 eq
->eq_max_cnt
= HNS_ROCE_AEQ_DEFAULT_BURST_NUM
;
4623 eq
->eq_period
= HNS_ROCE_AEQ_DEFAULT_INTERVAL
;
4626 ret
= hns_roce_v2_create_eq(hr_dev
, eq
, eq_cmd
);
4628 dev_err(dev
, "eq create failed.\n");
4629 goto err_create_eq_fail
;
4634 hns_roce_v2_int_mask_enable(hr_dev
, eq_num
, EQ_ENABLE
);
4636 /* irq contains: abnormal + AEQ + CEQ*/
4637 for (k
= 0; k
< irq_num
; k
++)
4639 snprintf((char *)hr_dev
->irq_names
[k
],
4640 HNS_ROCE_INT_NAME_LEN
, "hns-abn-%d", k
);
4641 else if (k
< (other_num
+ aeq_num
))
4642 snprintf((char *)hr_dev
->irq_names
[k
],
4643 HNS_ROCE_INT_NAME_LEN
, "hns-aeq-%d",
4646 snprintf((char *)hr_dev
->irq_names
[k
],
4647 HNS_ROCE_INT_NAME_LEN
, "hns-ceq-%d",
4648 k
- other_num
- aeq_num
);
4650 for (k
= 0; k
< irq_num
; k
++) {
4652 ret
= request_irq(hr_dev
->irq
[k
],
4653 hns_roce_v2_msix_interrupt_abn
,
4654 0, hr_dev
->irq_names
[k
], hr_dev
);
4656 else if (k
< (other_num
+ comp_num
))
4657 ret
= request_irq(eq_table
->eq
[k
- other_num
].irq
,
4658 hns_roce_v2_msix_interrupt_eq
,
4659 0, hr_dev
->irq_names
[k
+ aeq_num
],
4660 &eq_table
->eq
[k
- other_num
]);
4662 ret
= request_irq(eq_table
->eq
[k
- other_num
].irq
,
4663 hns_roce_v2_msix_interrupt_eq
,
4664 0, hr_dev
->irq_names
[k
- comp_num
],
4665 &eq_table
->eq
[k
- other_num
]);
4667 dev_err(dev
, "Request irq error!\n");
4668 goto err_request_irq_fail
;
4674 err_request_irq_fail
:
4675 for (k
-= 1; k
>= 0; k
--)
4677 free_irq(hr_dev
->irq
[k
], hr_dev
);
4679 free_irq(eq_table
->eq
[k
- other_num
].irq
,
4680 &eq_table
->eq
[k
- other_num
]);
4683 for (j
-= 1; j
>= 0; j
--)
4684 hns_roce_v2_free_eq(hr_dev
, &eq_table
->eq
[j
]);
4687 for (i
-= 1; i
>= 0; i
--)
4688 kfree(hr_dev
->irq_names
[i
]);
4689 kfree(eq_table
->eq
);
4694 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev
*hr_dev
)
4696 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
4701 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
4702 irq_num
= eq_num
+ hr_dev
->caps
.num_other_vectors
;
4705 hns_roce_v2_int_mask_enable(hr_dev
, eq_num
, EQ_DISABLE
);
4707 for (i
= 0; i
< hr_dev
->caps
.num_other_vectors
; i
++)
4708 free_irq(hr_dev
->irq
[i
], hr_dev
);
4710 for (i
= 0; i
< eq_num
; i
++) {
4711 hns_roce_v2_destroy_eqc(hr_dev
, i
);
4713 free_irq(eq_table
->eq
[i
].irq
, &eq_table
->eq
[i
]);
4715 hns_roce_v2_free_eq(hr_dev
, &eq_table
->eq
[i
]);
4718 for (i
= 0; i
< irq_num
; i
++)
4719 kfree(hr_dev
->irq_names
[i
]);
4721 kfree(eq_table
->eq
);
4724 static const struct hns_roce_hw hns_roce_hw_v2
= {
4725 .cmq_init
= hns_roce_v2_cmq_init
,
4726 .cmq_exit
= hns_roce_v2_cmq_exit
,
4727 .hw_profile
= hns_roce_v2_profile
,
4728 .post_mbox
= hns_roce_v2_post_mbox
,
4729 .chk_mbox
= hns_roce_v2_chk_mbox
,
4730 .set_gid
= hns_roce_v2_set_gid
,
4731 .set_mac
= hns_roce_v2_set_mac
,
4732 .write_mtpt
= hns_roce_v2_write_mtpt
,
4733 .rereg_write_mtpt
= hns_roce_v2_rereg_write_mtpt
,
4734 .write_cqc
= hns_roce_v2_write_cqc
,
4735 .set_hem
= hns_roce_v2_set_hem
,
4736 .clear_hem
= hns_roce_v2_clear_hem
,
4737 .modify_qp
= hns_roce_v2_modify_qp
,
4738 .query_qp
= hns_roce_v2_query_qp
,
4739 .destroy_qp
= hns_roce_v2_destroy_qp
,
4740 .modify_cq
= hns_roce_v2_modify_cq
,
4741 .post_send
= hns_roce_v2_post_send
,
4742 .post_recv
= hns_roce_v2_post_recv
,
4743 .req_notify_cq
= hns_roce_v2_req_notify_cq
,
4744 .poll_cq
= hns_roce_v2_poll_cq
,
4745 .init_eq
= hns_roce_v2_init_eq_table
,
4746 .cleanup_eq
= hns_roce_v2_cleanup_eq_table
,
4749 static const struct pci_device_id hns_roce_hw_v2_pci_tbl
[] = {
4750 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
4751 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
4752 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
4753 /* required last entry */
4757 MODULE_DEVICE_TABLE(pci
, hns_roce_hw_v2_pci_tbl
);
4759 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev
*hr_dev
,
4760 struct hnae3_handle
*handle
)
4762 const struct pci_device_id
*id
;
4765 id
= pci_match_id(hns_roce_hw_v2_pci_tbl
, hr_dev
->pci_dev
);
4767 dev_err(hr_dev
->dev
, "device is not compatible!\n");
4771 hr_dev
->hw
= &hns_roce_hw_v2
;
4772 hr_dev
->sdb_offset
= ROCEE_DB_SQ_L_0_REG
;
4773 hr_dev
->odb_offset
= hr_dev
->sdb_offset
;
4775 /* Get info from NIC driver. */
4776 hr_dev
->reg_base
= handle
->rinfo
.roce_io_base
;
4777 hr_dev
->caps
.num_ports
= 1;
4778 hr_dev
->iboe
.netdevs
[0] = handle
->rinfo
.netdev
;
4779 hr_dev
->iboe
.phy_port
[0] = 0;
4781 addrconf_addr_eui48((u8
*)&hr_dev
->ib_dev
.node_guid
,
4782 hr_dev
->iboe
.netdevs
[0]->dev_addr
);
4784 for (i
= 0; i
< HNS_ROCE_V2_MAX_IRQ_NUM
; i
++)
4785 hr_dev
->irq
[i
] = pci_irq_vector(handle
->pdev
,
4786 i
+ handle
->rinfo
.base_vector
);
4788 /* cmd issue mode: 0 is poll, 1 is event */
4789 hr_dev
->cmd_mod
= 1;
4790 hr_dev
->loop_idc
= 0;
4795 static int hns_roce_hw_v2_init_instance(struct hnae3_handle
*handle
)
4797 struct hns_roce_dev
*hr_dev
;
4800 hr_dev
= (struct hns_roce_dev
*)ib_alloc_device(sizeof(*hr_dev
));
4804 hr_dev
->priv
= kzalloc(sizeof(struct hns_roce_v2_priv
), GFP_KERNEL
);
4805 if (!hr_dev
->priv
) {
4807 goto error_failed_kzalloc
;
4810 hr_dev
->pci_dev
= handle
->pdev
;
4811 hr_dev
->dev
= &handle
->pdev
->dev
;
4812 handle
->priv
= hr_dev
;
4814 ret
= hns_roce_hw_v2_get_cfg(hr_dev
, handle
);
4816 dev_err(hr_dev
->dev
, "Get Configuration failed!\n");
4817 goto error_failed_get_cfg
;
4820 ret
= hns_roce_init(hr_dev
);
4822 dev_err(hr_dev
->dev
, "RoCE Engine init failed!\n");
4823 goto error_failed_get_cfg
;
4828 error_failed_get_cfg
:
4829 kfree(hr_dev
->priv
);
4831 error_failed_kzalloc
:
4832 ib_dealloc_device(&hr_dev
->ib_dev
);
4837 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle
*handle
,
4840 struct hns_roce_dev
*hr_dev
= (struct hns_roce_dev
*)handle
->priv
;
4845 hns_roce_exit(hr_dev
);
4846 kfree(hr_dev
->priv
);
4847 ib_dealloc_device(&hr_dev
->ib_dev
);
4850 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle
*handle
)
4852 struct hns_roce_dev
*hr_dev
= (struct hns_roce_dev
*)handle
->priv
;
4853 struct ib_event event
;
4856 dev_err(&handle
->pdev
->dev
,
4857 "Input parameter handle->priv is NULL!\n");
4861 hr_dev
->active
= false;
4862 hr_dev
->is_reset
= true;
4864 event
.event
= IB_EVENT_DEVICE_FATAL
;
4865 event
.device
= &hr_dev
->ib_dev
;
4866 event
.element
.port_num
= 1;
4867 ib_dispatch_event(&event
);
4872 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle
*handle
)
4876 ret
= hns_roce_hw_v2_init_instance(handle
);
4878 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
4879 * callback function, RoCE Engine reinitialize. If RoCE reinit
4880 * failed, we should inform NIC driver.
4882 handle
->priv
= NULL
;
4883 dev_err(&handle
->pdev
->dev
,
4884 "In reset process RoCE reinit failed %d.\n", ret
);
4890 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle
*handle
)
4893 hns_roce_hw_v2_uninit_instance(handle
, false);
4897 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle
*handle
,
4898 enum hnae3_reset_notify_type type
)
4903 case HNAE3_DOWN_CLIENT
:
4904 ret
= hns_roce_hw_v2_reset_notify_down(handle
);
4906 case HNAE3_INIT_CLIENT
:
4907 ret
= hns_roce_hw_v2_reset_notify_init(handle
);
4909 case HNAE3_UNINIT_CLIENT
:
4910 ret
= hns_roce_hw_v2_reset_notify_uninit(handle
);
4919 static const struct hnae3_client_ops hns_roce_hw_v2_ops
= {
4920 .init_instance
= hns_roce_hw_v2_init_instance
,
4921 .uninit_instance
= hns_roce_hw_v2_uninit_instance
,
4922 .reset_notify
= hns_roce_hw_v2_reset_notify
,
4925 static struct hnae3_client hns_roce_hw_v2_client
= {
4926 .name
= "hns_roce_hw_v2",
4927 .type
= HNAE3_CLIENT_ROCE
,
4928 .ops
= &hns_roce_hw_v2_ops
,
4931 static int __init
hns_roce_hw_v2_init(void)
4933 return hnae3_register_client(&hns_roce_hw_v2_client
);
4936 static void __exit
hns_roce_hw_v2_exit(void)
4938 hnae3_unregister_client(&hns_roce_hw_v2_client
);
4941 module_init(hns_roce_hw_v2_init
);
4942 module_exit(hns_roce_hw_v2_exit
);
4944 MODULE_LICENSE("Dual BSD/GPL");
4945 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4946 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4947 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
4948 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");