2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
44 #include <asm-powerpc/system.h>
45 #include "ehca_classes.h"
46 #include "ehca_tools.h"
48 #include "ehca_iverbs.h"
52 static inline int ehca_write_rwqe(struct ipz_queue
*ipz_rqueue
,
53 struct ehca_wqe
*wqe_p
,
54 struct ib_recv_wr
*recv_wr
)
57 if (unlikely((recv_wr
->num_sge
< 0) ||
58 (recv_wr
->num_sge
> ipz_rqueue
->act_nr_of_sg
))) {
59 ehca_gen_err("Invalid number of WQE SGE. "
60 "num_sqe=%x max_nr_of_sg=%x",
61 recv_wr
->num_sge
, ipz_rqueue
->act_nr_of_sg
);
62 return -EINVAL
; /* invalid SG list length */
65 /* clear wqe header until sglist */
66 memset(wqe_p
, 0, offsetof(struct ehca_wqe
, u
.ud_av
.sg_list
));
68 wqe_p
->work_request_id
= recv_wr
->wr_id
;
69 wqe_p
->nr_of_data_seg
= recv_wr
->num_sge
;
71 for (cnt_ds
= 0; cnt_ds
< recv_wr
->num_sge
; cnt_ds
++) {
72 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].vaddr
=
73 recv_wr
->sg_list
[cnt_ds
].addr
;
74 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].lkey
=
75 recv_wr
->sg_list
[cnt_ds
].lkey
;
76 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].length
=
77 recv_wr
->sg_list
[cnt_ds
].length
;
80 if (ehca_debug_level
) {
81 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue
);
82 ehca_dmp( wqe_p
, 16*(6 + wqe_p
->nr_of_data_seg
), "recv wqe");
88 #if defined(DEBUG_GSI_SEND_WR)
90 /* need ib_mad struct */
91 #include <rdma/ib_mad.h>
93 static void trace_send_wr_ud(const struct ib_send_wr
*send_wr
)
98 struct ib_mad_hdr
*mad_hdr
= send_wr
->wr
.ud
.mad_hdr
;
99 struct ib_sge
*sge
= send_wr
->sg_list
;
100 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
101 "send_flags=%x opcode=%x",idx
, send_wr
->wr_id
,
102 send_wr
->num_sge
, send_wr
->send_flags
,
105 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
106 "mgmt_class=%x class_version=%x method=%x "
107 "status=%x class_specific=%x tid=%lx "
108 "attr_id=%x resv=%x attr_mod=%x",
109 idx
, mad_hdr
->base_version
,
111 mad_hdr
->class_version
, mad_hdr
->method
,
112 mad_hdr
->status
, mad_hdr
->class_specific
,
113 mad_hdr
->tid
, mad_hdr
->attr_id
,
117 for (j
= 0; j
< send_wr
->num_sge
; j
++) {
118 u8
*data
= (u8
*) abs_to_virt(sge
->addr
);
119 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
121 idx
, j
, data
, sge
->length
, sge
->lkey
);
122 /* assume length is n*16 */
123 ehca_dmp(data
, sge
->length
, "send_wr#%x sge#%x",
128 send_wr
= send_wr
->next
;
129 } /* eof while send_wr */
132 #endif /* DEBUG_GSI_SEND_WR */
134 static inline int ehca_write_swqe(struct ehca_qp
*qp
,
135 struct ehca_wqe
*wqe_p
,
136 const struct ib_send_wr
*send_wr
)
140 struct ehca_av
*my_av
;
141 u32 remote_qkey
= send_wr
->wr
.ud
.remote_qkey
;
143 if (unlikely((send_wr
->num_sge
< 0) ||
144 (send_wr
->num_sge
> qp
->ipz_squeue
.act_nr_of_sg
))) {
145 ehca_gen_err("Invalid number of WQE SGE. "
146 "num_sqe=%x max_nr_of_sg=%x",
147 send_wr
->num_sge
, qp
->ipz_squeue
.act_nr_of_sg
);
148 return -EINVAL
; /* invalid SG list length */
151 /* clear wqe header until sglist */
152 memset(wqe_p
, 0, offsetof(struct ehca_wqe
, u
.ud_av
.sg_list
));
154 wqe_p
->work_request_id
= send_wr
->wr_id
;
156 switch (send_wr
->opcode
) {
158 case IB_WR_SEND_WITH_IMM
:
159 wqe_p
->optype
= WQE_OPTYPE_SEND
;
161 case IB_WR_RDMA_WRITE
:
162 case IB_WR_RDMA_WRITE_WITH_IMM
:
163 wqe_p
->optype
= WQE_OPTYPE_RDMAWRITE
;
165 case IB_WR_RDMA_READ
:
166 wqe_p
->optype
= WQE_OPTYPE_RDMAREAD
;
169 ehca_gen_err("Invalid opcode=%x", send_wr
->opcode
);
170 return -EINVAL
; /* invalid opcode */
173 wqe_p
->wqef
= (send_wr
->opcode
) & WQEF_HIGH_NIBBLE
;
177 if (send_wr
->send_flags
& IB_SEND_SIGNALED
)
178 wqe_p
->wr_flag
|= WQE_WRFLAG_REQ_SIGNAL_COM
;
180 if (send_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
181 send_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
182 /* this might not work as long as HW does not support it */
183 wqe_p
->immediate_data
= be32_to_cpu(send_wr
->imm_data
);
184 wqe_p
->wr_flag
|= WQE_WRFLAG_IMM_DATA_PRESENT
;
187 wqe_p
->nr_of_data_seg
= send_wr
->num_sge
;
189 switch (qp
->qp_type
) {
192 /* no break is intential here */
194 /* IB 1.2 spec C10-15 compliance */
195 if (send_wr
->wr
.ud
.remote_qkey
& 0x80000000)
196 remote_qkey
= qp
->qkey
;
198 wqe_p
->destination_qp_number
= send_wr
->wr
.ud
.remote_qpn
<< 8;
199 wqe_p
->local_ee_context_qkey
= remote_qkey
;
200 if (!send_wr
->wr
.ud
.ah
) {
201 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp
);
204 my_av
= container_of(send_wr
->wr
.ud
.ah
, struct ehca_av
, ib_ah
);
205 wqe_p
->u
.ud_av
.ud_av
= my_av
->av
;
208 * omitted check of IB_SEND_INLINE
209 * since HW does not support it
211 for (idx
= 0; idx
< send_wr
->num_sge
; idx
++) {
212 wqe_p
->u
.ud_av
.sg_list
[idx
].vaddr
=
213 send_wr
->sg_list
[idx
].addr
;
214 wqe_p
->u
.ud_av
.sg_list
[idx
].lkey
=
215 send_wr
->sg_list
[idx
].lkey
;
216 wqe_p
->u
.ud_av
.sg_list
[idx
].length
=
217 send_wr
->sg_list
[idx
].length
;
219 if (qp
->qp_type
== IB_QPT_SMI
||
220 qp
->qp_type
== IB_QPT_GSI
)
221 wqe_p
->u
.ud_av
.ud_av
.pmtu
= 1;
222 if (qp
->qp_type
== IB_QPT_GSI
) {
223 wqe_p
->pkeyi
= send_wr
->wr
.ud
.pkey_index
;
224 #ifdef DEBUG_GSI_SEND_WR
225 trace_send_wr_ud(send_wr
);
226 #endif /* DEBUG_GSI_SEND_WR */
231 if (send_wr
->send_flags
& IB_SEND_FENCE
)
232 wqe_p
->wr_flag
|= WQE_WRFLAG_FENCE
;
233 /* no break is intentional here */
235 /* TODO: atomic not implemented */
236 wqe_p
->u
.nud
.remote_virtual_adress
=
237 send_wr
->wr
.rdma
.remote_addr
;
238 wqe_p
->u
.nud
.rkey
= send_wr
->wr
.rdma
.rkey
;
241 * omitted checking of IB_SEND_INLINE
242 * since HW does not support it
245 for (idx
= 0; idx
< send_wr
->num_sge
; idx
++) {
246 wqe_p
->u
.nud
.sg_list
[idx
].vaddr
=
247 send_wr
->sg_list
[idx
].addr
;
248 wqe_p
->u
.nud
.sg_list
[idx
].lkey
=
249 send_wr
->sg_list
[idx
].lkey
;
250 wqe_p
->u
.nud
.sg_list
[idx
].length
=
251 send_wr
->sg_list
[idx
].length
;
252 dma_length
+= send_wr
->sg_list
[idx
].length
;
254 wqe_p
->u
.nud
.atomic_1st_op_dma_len
= dma_length
;
259 ehca_gen_err("Invalid qptype=%x", qp
->qp_type
);
263 if (ehca_debug_level
) {
264 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp
);
265 ehca_dmp( wqe_p
, 16*(6 + wqe_p
->nr_of_data_seg
), "send wqe");
270 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
271 static inline void map_ib_wc_status(u32 cqe_status
,
272 enum ib_wc_status
*wc_status
)
274 if (unlikely(cqe_status
& WC_STATUS_ERROR_BIT
)) {
275 switch (cqe_status
& 0x3F) {
278 *wc_status
= IB_WC_LOC_LEN_ERR
;
282 *wc_status
= IB_WC_LOC_QP_OP_ERR
;
286 *wc_status
= IB_WC_LOC_EEC_OP_ERR
;
290 *wc_status
= IB_WC_LOC_PROT_ERR
;
294 *wc_status
= IB_WC_WR_FLUSH_ERR
;
297 *wc_status
= IB_WC_MW_BIND_ERR
;
299 case 0x07: /* remote error - look into bits 20:24 */
301 & WC_STATUS_REMOTE_ERROR_FLAGS
) >> 11) {
304 * PSN Sequence Error!
305 * couldn't find a matching status!
307 *wc_status
= IB_WC_GENERAL_ERR
;
310 *wc_status
= IB_WC_REM_INV_REQ_ERR
;
313 *wc_status
= IB_WC_REM_ACCESS_ERR
;
316 *wc_status
= IB_WC_REM_OP_ERR
;
319 *wc_status
= IB_WC_REM_INV_RD_REQ_ERR
;
324 *wc_status
= IB_WC_RETRY_EXC_ERR
;
327 *wc_status
= IB_WC_RNR_RETRY_EXC_ERR
;
331 *wc_status
= IB_WC_REM_ABORT_ERR
;
335 *wc_status
= IB_WC_INV_EECN_ERR
;
339 *wc_status
= IB_WC_INV_EEC_STATE_ERR
;
342 *wc_status
= IB_WC_BAD_RESP_ERR
;
346 *wc_status
= IB_WC_WR_FLUSH_ERR
;
349 *wc_status
= IB_WC_FATAL_ERR
;
353 *wc_status
= IB_WC_SUCCESS
;
356 int ehca_post_send(struct ib_qp
*qp
,
357 struct ib_send_wr
*send_wr
,
358 struct ib_send_wr
**bad_send_wr
)
360 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
361 struct ib_send_wr
*cur_send_wr
;
362 struct ehca_wqe
*wqe_p
;
365 unsigned long spl_flags
;
368 spin_lock_irqsave(&my_qp
->spinlock_s
, spl_flags
);
370 /* loop processes list of send reqs */
371 for (cur_send_wr
= send_wr
; cur_send_wr
!= NULL
;
372 cur_send_wr
= cur_send_wr
->next
) {
373 u64 start_offset
= my_qp
->ipz_squeue
.current_q_offset
;
374 /* get pointer next to free WQE */
375 wqe_p
= ipz_qeit_get_inc(&my_qp
->ipz_squeue
);
376 if (unlikely(!wqe_p
)) {
377 /* too many posted work requests: queue overflow */
379 *bad_send_wr
= cur_send_wr
;
382 ehca_err(qp
->device
, "Too many posted WQEs "
383 "qp_num=%x", qp
->qp_num
);
385 goto post_send_exit0
;
387 /* write a SEND WQE into the QUEUE */
388 ret
= ehca_write_swqe(my_qp
, wqe_p
, cur_send_wr
);
390 * if something failed,
391 * reset the free entry pointer to the start value
394 my_qp
->ipz_squeue
.current_q_offset
= start_offset
;
395 *bad_send_wr
= cur_send_wr
;
398 ehca_err(qp
->device
, "Could not write WQE "
399 "qp_num=%x", qp
->qp_num
);
401 goto post_send_exit0
;
404 ehca_dbg(qp
->device
, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
405 my_qp
, qp
->qp_num
, wqe_cnt
);
406 } /* eof for cur_send_wr */
409 /* UNLOCK the QUEUE */
410 spin_unlock_irqrestore(&my_qp
->spinlock_s
, spl_flags
);
411 iosync(); /* serialize GAL register access */
412 hipz_update_sqa(my_qp
, wqe_cnt
);
416 int ehca_post_recv(struct ib_qp
*qp
,
417 struct ib_recv_wr
*recv_wr
,
418 struct ib_recv_wr
**bad_recv_wr
)
420 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
421 struct ib_recv_wr
*cur_recv_wr
;
422 struct ehca_wqe
*wqe_p
;
425 unsigned long spl_flags
;
428 spin_lock_irqsave(&my_qp
->spinlock_r
, spl_flags
);
430 /* loop processes list of send reqs */
431 for (cur_recv_wr
= recv_wr
; cur_recv_wr
!= NULL
;
432 cur_recv_wr
= cur_recv_wr
->next
) {
433 u64 start_offset
= my_qp
->ipz_rqueue
.current_q_offset
;
434 /* get pointer next to free WQE */
435 wqe_p
= ipz_qeit_get_inc(&my_qp
->ipz_rqueue
);
436 if (unlikely(!wqe_p
)) {
437 /* too many posted work requests: queue overflow */
439 *bad_recv_wr
= cur_recv_wr
;
442 ehca_err(qp
->device
, "Too many posted WQEs "
443 "qp_num=%x", qp
->qp_num
);
445 goto post_recv_exit0
;
447 /* write a RECV WQE into the QUEUE */
448 ret
= ehca_write_rwqe(&my_qp
->ipz_rqueue
, wqe_p
, cur_recv_wr
);
450 * if something failed,
451 * reset the free entry pointer to the start value
454 my_qp
->ipz_rqueue
.current_q_offset
= start_offset
;
455 *bad_recv_wr
= cur_recv_wr
;
458 ehca_err(qp
->device
, "Could not write WQE "
459 "qp_num=%x", qp
->qp_num
);
461 goto post_recv_exit0
;
464 ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
465 my_qp
, qp
->qp_num
, wqe_cnt
);
466 } /* eof for cur_recv_wr */
469 spin_unlock_irqrestore(&my_qp
->spinlock_r
, spl_flags
);
470 iosync(); /* serialize GAL register access */
471 hipz_update_rqa(my_qp
, wqe_cnt
);
476 * ib_wc_opcode table converts ehca wc opcode to ib
477 * Since we use zero to indicate invalid opcode, the actual ib opcode must
480 static const u8 ib_wc_opcode
[255] = {
481 [0x01] = IB_WC_RECV
+1,
482 [0x02] = IB_WC_RECV_RDMA_WITH_IMM
+1,
483 [0x04] = IB_WC_BIND_MW
+1,
484 [0x08] = IB_WC_FETCH_ADD
+1,
485 [0x10] = IB_WC_COMP_SWAP
+1,
486 [0x20] = IB_WC_RDMA_WRITE
+1,
487 [0x40] = IB_WC_RDMA_READ
+1,
488 [0x80] = IB_WC_SEND
+1
491 /* internal function to poll one entry of cq */
492 static inline int ehca_poll_cq_one(struct ib_cq
*cq
, struct ib_wc
*wc
)
495 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
496 struct ehca_cqe
*cqe
;
499 poll_cq_one_read_cqe
:
500 cqe
= (struct ehca_cqe
*)
501 ipz_qeit_get_inc_valid(&my_cq
->ipz_queue
);
504 ehca_dbg(cq
->device
, "Completion queue is empty ehca_cq=%p "
505 "cq_num=%x ret=%x", my_cq
, my_cq
->cq_number
, ret
);
506 goto poll_cq_one_exit0
;
509 /* prevents loads being reordered across this point */
513 if (unlikely(cqe
->status
& WC_STATUS_PURGE_BIT
)) {
514 struct ehca_qp
*qp
=ehca_cq_get_qp(my_cq
, cqe
->local_qp_number
);
516 unsigned long spl_flags
;
518 ehca_err(cq
->device
, "cq_num=%x qp_num=%x "
519 "could not find qp -> ignore cqe",
520 my_cq
->cq_number
, cqe
->local_qp_number
);
521 ehca_dmp(cqe
, 64, "cq_num=%x qp_num=%x",
522 my_cq
->cq_number
, cqe
->local_qp_number
);
523 /* ignore this purged cqe */
524 goto poll_cq_one_read_cqe
;
526 spin_lock_irqsave(&qp
->spinlock_s
, spl_flags
);
527 purgeflag
= qp
->sqerr_purgeflag
;
528 spin_unlock_irqrestore(&qp
->spinlock_s
, spl_flags
);
531 ehca_dbg(cq
->device
, "Got CQE with purged bit qp_num=%x "
533 cqe
->local_qp_number
, cqe
->remote_qp_number
);
534 if (ehca_debug_level
)
535 ehca_dmp(cqe
, 64, "qp_num=%x src_qp=%x",
536 cqe
->local_qp_number
,
537 cqe
->remote_qp_number
);
539 * ignore this to avoid double cqes of bad wqe
540 * that caused sqe and turn off purge flag
542 qp
->sqerr_purgeflag
= 0;
543 goto poll_cq_one_read_cqe
;
548 if (ehca_debug_level
) {
550 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
551 my_cq
, my_cq
->cq_number
);
552 ehca_dmp(cqe
, 64, "ehca_cq=%p cq_num=%x",
553 my_cq
, my_cq
->cq_number
);
555 "ehca_cq=%p cq_num=%x -------------------------",
556 my_cq
, my_cq
->cq_number
);
559 /* we got a completion! */
560 wc
->wr_id
= cqe
->work_request_id
;
562 /* eval ib_wc_opcode */
563 wc
->opcode
= ib_wc_opcode
[cqe
->optype
]-1;
564 if (unlikely(wc
->opcode
== -1)) {
565 ehca_err(cq
->device
, "Invalid cqe->OPType=%x cqe->status=%x "
566 "ehca_cq=%p cq_num=%x",
567 cqe
->optype
, cqe
->status
, my_cq
, my_cq
->cq_number
);
568 /* dump cqe for other infos */
569 ehca_dmp(cqe
, 64, "ehca_cq=%p cq_num=%x",
570 my_cq
, my_cq
->cq_number
);
571 /* update also queue adder to throw away this entry!!! */
572 goto poll_cq_one_exit0
;
574 /* eval ib_wc_status */
575 if (unlikely(cqe
->status
& WC_STATUS_ERROR_BIT
)) {
576 /* complete with errors */
577 map_ib_wc_status(cqe
->status
, &wc
->status
);
578 wc
->vendor_err
= wc
->status
;
580 wc
->status
= IB_WC_SUCCESS
;
583 wc
->byte_len
= cqe
->nr_bytes_transferred
;
584 wc
->pkey_index
= cqe
->pkey_index
;
585 wc
->slid
= cqe
->rlid
;
586 wc
->dlid_path_bits
= cqe
->dlid
;
587 wc
->src_qp
= cqe
->remote_qp_number
;
588 wc
->wc_flags
= cqe
->w_completion_flags
;
589 wc
->imm_data
= cpu_to_be32(cqe
->immediate_data
);
590 wc
->sl
= cqe
->service_level
;
592 if (wc
->status
!= IB_WC_SUCCESS
)
594 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
595 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
596 "cqe=%p", my_cq
, my_cq
->cq_number
, cqe
->optype
,
597 cqe
->status
, cqe
->local_qp_number
,
598 cqe
->remote_qp_number
, cqe
->work_request_id
, cqe
);
602 hipz_update_feca(my_cq
, cqe_count
);
607 int ehca_poll_cq(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
)
609 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
611 struct ib_wc
*current_wc
= wc
;
613 unsigned long spl_flags
;
615 if (num_entries
< 1) {
616 ehca_err(cq
->device
, "Invalid num_entries=%d ehca_cq=%p "
617 "cq_num=%x", num_entries
, my_cq
, my_cq
->cq_number
);
622 spin_lock_irqsave(&my_cq
->spinlock
, spl_flags
);
623 for (nr
= 0; nr
< num_entries
; nr
++) {
624 ret
= ehca_poll_cq_one(cq
, current_wc
);
629 spin_unlock_irqrestore(&my_cq
->spinlock
, spl_flags
);
630 if (ret
== -EAGAIN
|| !ret
)
637 int ehca_req_notify_cq(struct ib_cq
*cq
, enum ib_cq_notify_flags notify_flags
)
639 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
640 unsigned long spl_flags
;
643 switch (notify_flags
& IB_CQ_SOLICITED_MASK
) {
644 case IB_CQ_SOLICITED
:
645 hipz_set_cqx_n0(my_cq
, 1);
647 case IB_CQ_NEXT_COMP
:
648 hipz_set_cqx_n1(my_cq
, 1);
654 if (notify_flags
& IB_CQ_REPORT_MISSED_EVENTS
) {
655 spin_lock_irqsave(&my_cq
->spinlock
, spl_flags
);
656 ret
= ipz_qeit_is_valid(&my_cq
->ipz_queue
);
657 spin_unlock_irqrestore(&my_cq
->spinlock
, spl_flags
);