2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
40 static void mlx4_ib_cq_comp(struct mlx4_cq
*cq
)
42 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
43 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
46 static void mlx4_ib_cq_event(struct mlx4_cq
*cq
, enum mlx4_event type
)
48 struct ib_event event
;
51 if (type
!= MLX4_EVENT_TYPE_CQ_ERROR
) {
52 printk(KERN_WARNING
"mlx4_ib: Unexpected event type %d "
53 "on CQ %06x\n", type
, cq
->cqn
);
57 ibcq
= &to_mibcq(cq
)->ibcq
;
58 if (ibcq
->event_handler
) {
59 event
.device
= ibcq
->device
;
60 event
.event
= IB_EVENT_CQ_ERR
;
61 event
.element
.cq
= ibcq
;
62 ibcq
->event_handler(&event
, ibcq
->cq_context
);
66 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf
*buf
, int n
)
68 return mlx4_buf_offset(&buf
->buf
, n
* sizeof (struct mlx4_cqe
));
71 static void *get_cqe(struct mlx4_ib_cq
*cq
, int n
)
73 return get_cqe_from_buf(&cq
->buf
, n
);
76 static void *get_sw_cqe(struct mlx4_ib_cq
*cq
, int n
)
78 struct mlx4_cqe
*cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
80 return (!!(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
) ^
81 !!(n
& (cq
->ibcq
.cqe
+ 1))) ? NULL
: cqe
;
84 static struct mlx4_cqe
*next_cqe_sw(struct mlx4_ib_cq
*cq
)
86 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
89 int mlx4_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
91 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
92 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
94 return mlx4_cq_modify(dev
->dev
, &mcq
->mcq
, cq_count
, cq_period
);
97 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int nent
)
101 err
= mlx4_buf_alloc(dev
->dev
, nent
* sizeof(struct mlx4_cqe
),
102 PAGE_SIZE
* 2, &buf
->buf
);
107 err
= mlx4_mtt_init(dev
->dev
, buf
->buf
.npages
, buf
->buf
.page_shift
,
112 err
= mlx4_buf_write_mtt(dev
->dev
, &buf
->mtt
, &buf
->buf
);
119 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
122 mlx4_buf_free(dev
->dev
, nent
* sizeof(struct mlx4_cqe
),
129 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int cqe
)
131 mlx4_buf_free(dev
->dev
, (cqe
+ 1) * sizeof(struct mlx4_cqe
), &buf
->buf
);
134 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev
*dev
, struct ib_ucontext
*context
,
135 struct mlx4_ib_cq_buf
*buf
, struct ib_umem
**umem
,
136 u64 buf_addr
, int cqe
)
140 *umem
= ib_umem_get(context
, buf_addr
, cqe
* sizeof (struct mlx4_cqe
),
141 IB_ACCESS_LOCAL_WRITE
, 1);
143 return PTR_ERR(*umem
);
145 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(*umem
),
146 ilog2((*umem
)->page_size
), &buf
->mtt
);
150 err
= mlx4_ib_umem_write_mtt(dev
, &buf
->mtt
, *umem
);
157 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
160 ib_umem_release(*umem
);
165 struct ib_cq
*mlx4_ib_create_cq(struct ib_device
*ibdev
, int entries
, int vector
,
166 struct ib_ucontext
*context
,
167 struct ib_udata
*udata
)
169 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
170 struct mlx4_ib_cq
*cq
;
171 struct mlx4_uar
*uar
;
174 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
)
175 return ERR_PTR(-EINVAL
);
177 cq
= kmalloc(sizeof *cq
, GFP_KERNEL
);
179 return ERR_PTR(-ENOMEM
);
181 entries
= roundup_pow_of_two(entries
+ 1);
182 cq
->ibcq
.cqe
= entries
- 1;
183 mutex_init(&cq
->resize_mutex
);
184 spin_lock_init(&cq
->lock
);
185 cq
->resize_buf
= NULL
;
186 cq
->resize_umem
= NULL
;
189 struct mlx4_ib_create_cq ucmd
;
191 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
196 err
= mlx4_ib_get_cq_umem(dev
, context
, &cq
->buf
, &cq
->umem
,
197 ucmd
.buf_addr
, entries
);
201 err
= mlx4_ib_db_map_user(to_mucontext(context
), ucmd
.db_addr
,
206 uar
= &to_mucontext(context
)->uar
;
208 err
= mlx4_db_alloc(dev
->dev
, &cq
->db
, 1);
212 cq
->mcq
.set_ci_db
= cq
->db
.db
;
213 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
214 *cq
->mcq
.set_ci_db
= 0;
217 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->buf
, entries
);
221 uar
= &dev
->priv_uar
;
224 err
= mlx4_cq_alloc(dev
->dev
, entries
, &cq
->buf
.mtt
, uar
,
225 cq
->db
.dma
, &cq
->mcq
, vector
, 0);
229 cq
->mcq
.comp
= mlx4_ib_cq_comp
;
230 cq
->mcq
.event
= mlx4_ib_cq_event
;
233 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof (__u32
))) {
242 mlx4_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
245 mlx4_mtt_cleanup(dev
->dev
, &cq
->buf
.mtt
);
248 ib_umem_release(cq
->umem
);
250 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
254 mlx4_db_free(dev
->dev
, &cq
->db
);
262 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
270 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
274 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->resize_buf
->buf
, entries
);
276 kfree(cq
->resize_buf
);
277 cq
->resize_buf
= NULL
;
281 cq
->resize_buf
->cqe
= entries
- 1;
286 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
287 int entries
, struct ib_udata
*udata
)
289 struct mlx4_ib_resize_cq ucmd
;
295 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
))
298 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
302 err
= mlx4_ib_get_cq_umem(dev
, cq
->umem
->context
, &cq
->resize_buf
->buf
,
303 &cq
->resize_umem
, ucmd
.buf_addr
, entries
);
305 kfree(cq
->resize_buf
);
306 cq
->resize_buf
= NULL
;
310 cq
->resize_buf
->cqe
= entries
- 1;
315 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq
*cq
)
319 i
= cq
->mcq
.cons_index
;
320 while (get_sw_cqe(cq
, i
& cq
->ibcq
.cqe
))
323 return i
- cq
->mcq
.cons_index
;
326 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq
*cq
)
328 struct mlx4_cqe
*cqe
, *new_cqe
;
331 i
= cq
->mcq
.cons_index
;
332 cqe
= get_cqe(cq
, i
& cq
->ibcq
.cqe
);
333 while ((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) != MLX4_CQE_OPCODE_RESIZE
) {
334 new_cqe
= get_cqe_from_buf(&cq
->resize_buf
->buf
,
335 (i
+ 1) & cq
->resize_buf
->cqe
);
336 memcpy(new_cqe
, get_cqe(cq
, i
& cq
->ibcq
.cqe
), sizeof(struct mlx4_cqe
));
337 new_cqe
->owner_sr_opcode
= (cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
) |
338 (((i
+ 1) & (cq
->resize_buf
->cqe
+ 1)) ? MLX4_CQE_OWNER_MASK
: 0);
339 cqe
= get_cqe(cq
, ++i
& cq
->ibcq
.cqe
);
341 ++cq
->mcq
.cons_index
;
344 int mlx4_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
346 struct mlx4_ib_dev
*dev
= to_mdev(ibcq
->device
);
347 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
352 mutex_lock(&cq
->resize_mutex
);
354 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
) {
359 entries
= roundup_pow_of_two(entries
+ 1);
360 if (entries
== ibcq
->cqe
+ 1) {
366 err
= mlx4_alloc_resize_umem(dev
, cq
, entries
, udata
);
370 /* Can't be smaller than the number of outstanding CQEs */
371 outst_cqe
= mlx4_ib_get_outstanding_cqes(cq
);
372 if (entries
< outst_cqe
+ 1) {
377 err
= mlx4_alloc_resize_buf(dev
, cq
, entries
);
384 err
= mlx4_cq_resize(dev
->dev
, &cq
->mcq
, entries
, &cq
->resize_buf
->buf
.mtt
);
388 mlx4_mtt_cleanup(dev
->dev
, &mtt
);
390 cq
->buf
= cq
->resize_buf
->buf
;
391 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
392 ib_umem_release(cq
->umem
);
393 cq
->umem
= cq
->resize_umem
;
395 kfree(cq
->resize_buf
);
396 cq
->resize_buf
= NULL
;
397 cq
->resize_umem
= NULL
;
399 spin_lock_irq(&cq
->lock
);
400 if (cq
->resize_buf
) {
401 mlx4_ib_cq_resize_copy_cqes(cq
);
402 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
403 cq
->buf
= cq
->resize_buf
->buf
;
404 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
406 kfree(cq
->resize_buf
);
407 cq
->resize_buf
= NULL
;
409 spin_unlock_irq(&cq
->lock
);
415 mlx4_mtt_cleanup(dev
->dev
, &cq
->resize_buf
->buf
.mtt
);
417 mlx4_ib_free_cq_buf(dev
, &cq
->resize_buf
->buf
,
418 cq
->resize_buf
->cqe
);
420 kfree(cq
->resize_buf
);
421 cq
->resize_buf
= NULL
;
423 if (cq
->resize_umem
) {
424 ib_umem_release(cq
->resize_umem
);
425 cq
->resize_umem
= NULL
;
429 mutex_unlock(&cq
->resize_mutex
);
433 int mlx4_ib_destroy_cq(struct ib_cq
*cq
)
435 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
436 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
438 mlx4_cq_free(dev
->dev
, &mcq
->mcq
);
439 mlx4_mtt_cleanup(dev
->dev
, &mcq
->buf
.mtt
);
442 mlx4_ib_db_unmap_user(to_mucontext(cq
->uobject
->context
), &mcq
->db
);
443 ib_umem_release(mcq
->umem
);
445 mlx4_ib_free_cq_buf(dev
, &mcq
->buf
, cq
->cqe
);
446 mlx4_db_free(dev
->dev
, &mcq
->db
);
454 static void dump_cqe(void *cqe
)
458 printk(KERN_DEBUG
"CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
459 be32_to_cpu(buf
[0]), be32_to_cpu(buf
[1]), be32_to_cpu(buf
[2]),
460 be32_to_cpu(buf
[3]), be32_to_cpu(buf
[4]), be32_to_cpu(buf
[5]),
461 be32_to_cpu(buf
[6]), be32_to_cpu(buf
[7]));
464 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe
*cqe
,
467 if (cqe
->syndrome
== MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
) {
468 printk(KERN_DEBUG
"local QP operation err "
469 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
471 be32_to_cpu(cqe
->my_qpn
), be16_to_cpu(cqe
->wqe_index
),
472 cqe
->vendor_err_syndrome
,
473 cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
477 switch (cqe
->syndrome
) {
478 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
479 wc
->status
= IB_WC_LOC_LEN_ERR
;
481 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
482 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
484 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR
:
485 wc
->status
= IB_WC_LOC_PROT_ERR
;
487 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR
:
488 wc
->status
= IB_WC_WR_FLUSH_ERR
;
490 case MLX4_CQE_SYNDROME_MW_BIND_ERR
:
491 wc
->status
= IB_WC_MW_BIND_ERR
;
493 case MLX4_CQE_SYNDROME_BAD_RESP_ERR
:
494 wc
->status
= IB_WC_BAD_RESP_ERR
;
496 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
497 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
499 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
500 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
502 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
503 wc
->status
= IB_WC_REM_ACCESS_ERR
;
505 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR
:
506 wc
->status
= IB_WC_REM_OP_ERR
;
508 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
509 wc
->status
= IB_WC_RETRY_EXC_ERR
;
511 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
512 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
514 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
515 wc
->status
= IB_WC_REM_ABORT_ERR
;
518 wc
->status
= IB_WC_GENERAL_ERR
;
522 wc
->vendor_err
= cqe
->vendor_err_syndrome
;
525 static int mlx4_ib_ipoib_csum_ok(__be16 status
, __be16 checksum
)
527 return ((status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
528 MLX4_CQE_STATUS_IPV4F
|
529 MLX4_CQE_STATUS_IPV4OPT
|
530 MLX4_CQE_STATUS_IPV6
|
531 MLX4_CQE_STATUS_IPOK
)) ==
532 cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
533 MLX4_CQE_STATUS_IPOK
)) &&
534 (status
& cpu_to_be16(MLX4_CQE_STATUS_UDP
|
535 MLX4_CQE_STATUS_TCP
)) &&
536 checksum
== cpu_to_be16(0xffff);
539 static int mlx4_ib_poll_one(struct mlx4_ib_cq
*cq
,
540 struct mlx4_ib_qp
**cur_qp
,
543 struct mlx4_cqe
*cqe
;
545 struct mlx4_ib_wq
*wq
;
546 struct mlx4_ib_srq
*srq
;
553 cqe
= next_cqe_sw(cq
);
557 ++cq
->mcq
.cons_index
;
560 * Make sure we read CQ entry contents after we've checked the
565 is_send
= cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
;
566 is_error
= (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
567 MLX4_CQE_OPCODE_ERROR
;
569 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_OPCODE_NOP
&&
571 printk(KERN_WARNING
"Completion for NOP opcode detected!\n");
575 /* Resize CQ in progress */
576 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_CQE_OPCODE_RESIZE
)) {
577 if (cq
->resize_buf
) {
578 struct mlx4_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
580 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
581 cq
->buf
= cq
->resize_buf
->buf
;
582 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
584 kfree(cq
->resize_buf
);
585 cq
->resize_buf
= NULL
;
592 (be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) != (*cur_qp
)->mqp
.qpn
) {
594 * We do not have to take the QP table lock here,
595 * because CQs will be locked while QPs are removed
598 mqp
= __mlx4_qp_lookup(to_mdev(cq
->ibcq
.device
)->dev
,
599 be32_to_cpu(cqe
->vlan_my_qpn
));
600 if (unlikely(!mqp
)) {
601 printk(KERN_WARNING
"CQ %06x with entry for unknown QPN %06x\n",
602 cq
->mcq
.cqn
, be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
);
606 *cur_qp
= to_mibqp(mqp
);
609 wc
->qp
= &(*cur_qp
)->ibqp
;
613 if (!(*cur_qp
)->sq_signal_bits
) {
614 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
615 wq
->tail
+= (u16
) (wqe_ctr
- (u16
) wq
->tail
);
617 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
619 } else if ((*cur_qp
)->ibqp
.srq
) {
620 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
621 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
622 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
623 mlx4_ib_free_srq_wqe(srq
, wqe_ctr
);
626 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
630 if (unlikely(is_error
)) {
631 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe
*) cqe
, wc
);
635 wc
->status
= IB_WC_SUCCESS
;
639 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
640 case MLX4_OPCODE_RDMA_WRITE_IMM
:
641 wc
->wc_flags
|= IB_WC_WITH_IMM
;
642 case MLX4_OPCODE_RDMA_WRITE
:
643 wc
->opcode
= IB_WC_RDMA_WRITE
;
645 case MLX4_OPCODE_SEND_IMM
:
646 wc
->wc_flags
|= IB_WC_WITH_IMM
;
647 case MLX4_OPCODE_SEND
:
648 case MLX4_OPCODE_SEND_INVAL
:
649 wc
->opcode
= IB_WC_SEND
;
651 case MLX4_OPCODE_RDMA_READ
:
652 wc
->opcode
= IB_WC_RDMA_READ
;
653 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
655 case MLX4_OPCODE_ATOMIC_CS
:
656 wc
->opcode
= IB_WC_COMP_SWAP
;
659 case MLX4_OPCODE_ATOMIC_FA
:
660 wc
->opcode
= IB_WC_FETCH_ADD
;
663 case MLX4_OPCODE_BIND_MW
:
664 wc
->opcode
= IB_WC_BIND_MW
;
666 case MLX4_OPCODE_LSO
:
667 wc
->opcode
= IB_WC_LSO
;
669 case MLX4_OPCODE_FMR
:
670 wc
->opcode
= IB_WC_FAST_REG_MR
;
672 case MLX4_OPCODE_LOCAL_INVAL
:
673 wc
->opcode
= IB_WC_LOCAL_INV
;
677 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
679 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
680 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM
:
681 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
682 wc
->wc_flags
= IB_WC_WITH_IMM
;
683 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
685 case MLX4_RECV_OPCODE_SEND_INVAL
:
686 wc
->opcode
= IB_WC_RECV
;
687 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
688 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->immed_rss_invalid
);
690 case MLX4_RECV_OPCODE_SEND
:
691 wc
->opcode
= IB_WC_RECV
;
694 case MLX4_RECV_OPCODE_SEND_IMM
:
695 wc
->opcode
= IB_WC_RECV
;
696 wc
->wc_flags
= IB_WC_WITH_IMM
;
697 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
701 wc
->slid
= be16_to_cpu(cqe
->rlid
);
702 wc
->sl
= be16_to_cpu(cqe
->sl_vid
) >> 12;
703 g_mlpath_rqpn
= be32_to_cpu(cqe
->g_mlpath_rqpn
);
704 wc
->src_qp
= g_mlpath_rqpn
& 0xffffff;
705 wc
->dlid_path_bits
= (g_mlpath_rqpn
>> 24) & 0x7f;
706 wc
->wc_flags
|= g_mlpath_rqpn
& 0x80000000 ? IB_WC_GRH
: 0;
707 wc
->pkey_index
= be32_to_cpu(cqe
->immed_rss_invalid
) & 0x7f;
708 wc
->csum_ok
= mlx4_ib_ipoib_csum_ok(cqe
->status
, cqe
->checksum
);
714 int mlx4_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
716 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
717 struct mlx4_ib_qp
*cur_qp
= NULL
;
722 spin_lock_irqsave(&cq
->lock
, flags
);
724 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
725 err
= mlx4_ib_poll_one(cq
, &cur_qp
, wc
+ npolled
);
731 mlx4_cq_set_ci(&cq
->mcq
);
733 spin_unlock_irqrestore(&cq
->lock
, flags
);
735 if (err
== 0 || err
== -EAGAIN
)
741 int mlx4_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
743 mlx4_cq_arm(&to_mcq(ibcq
)->mcq
,
744 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
745 MLX4_CQ_DB_REQ_NOT_SOL
: MLX4_CQ_DB_REQ_NOT
,
746 to_mdev(ibcq
->device
)->uar_map
,
747 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->uar_lock
));
752 void __mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
756 struct mlx4_cqe
*cqe
, *dest
;
760 * First we need to find the current producer index, so we
761 * know where to start cleaning from. It doesn't matter if HW
762 * adds new entries after this loop -- the QP we're worried
763 * about is already in RESET, so the new entries won't come
764 * from our QP and therefore don't need to be checked.
766 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); ++prod_index
)
767 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
771 * Now sweep backwards through the CQ, removing CQ entries
772 * that match our QP by copying older entries on top of them.
774 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
775 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
776 if ((be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) == qpn
) {
777 if (srq
&& !(cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
))
778 mlx4_ib_free_srq_wqe(srq
, be16_to_cpu(cqe
->wqe_index
));
781 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
782 owner_bit
= dest
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
;
783 memcpy(dest
, cqe
, sizeof *cqe
);
784 dest
->owner_sr_opcode
= owner_bit
|
785 (dest
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
790 cq
->mcq
.cons_index
+= nfreed
;
792 * Make sure update of buffer contents is done before
793 * updating consumer index.
796 mlx4_cq_set_ci(&cq
->mcq
);
800 void mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
802 spin_lock_irq(&cq
->lock
);
803 __mlx4_ib_cq_clean(cq
, qpn
, srq
);
804 spin_unlock_irq(&cq
->lock
);