2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
39 #include <linux/init.h>
40 #include <linux/hardirq.h>
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_memfree.h"
49 MTHCA_MAX_DIRECT_CQ_SIZE
= 4 * PAGE_SIZE
53 MTHCA_CQ_ENTRY_SIZE
= 0x20
57 * Must be packed because start is 64 bits but only aligned to 32 bits.
59 struct mthca_cq_context
{
63 u32 error_eqn
; /* Tavor only */
67 u32 last_notified_index
;
68 u32 solicit_producer_index
;
72 u32 ci_db
; /* Arbel only */
73 u32 state_db
; /* Arbel only */
75 } __attribute__((packed
));
77 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
78 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
79 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
80 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
81 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
82 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
83 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
84 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
85 #define MTHCA_EQ_STATE_FIRED (10 << 8)
88 MTHCA_ERROR_CQE_OPCODE_MASK
= 0xfe
92 SYNDROME_LOCAL_LENGTH_ERR
= 0x01,
93 SYNDROME_LOCAL_QP_OP_ERR
= 0x02,
94 SYNDROME_LOCAL_EEC_OP_ERR
= 0x03,
95 SYNDROME_LOCAL_PROT_ERR
= 0x04,
96 SYNDROME_WR_FLUSH_ERR
= 0x05,
97 SYNDROME_MW_BIND_ERR
= 0x06,
98 SYNDROME_BAD_RESP_ERR
= 0x10,
99 SYNDROME_LOCAL_ACCESS_ERR
= 0x11,
100 SYNDROME_REMOTE_INVAL_REQ_ERR
= 0x12,
101 SYNDROME_REMOTE_ACCESS_ERR
= 0x13,
102 SYNDROME_REMOTE_OP_ERR
= 0x14,
103 SYNDROME_RETRY_EXC_ERR
= 0x15,
104 SYNDROME_RNR_RETRY_EXC_ERR
= 0x16,
105 SYNDROME_LOCAL_RDD_VIOL_ERR
= 0x20,
106 SYNDROME_REMOTE_INVAL_RD_REQ_ERR
= 0x21,
107 SYNDROME_REMOTE_ABORTED_ERR
= 0x22,
108 SYNDROME_INVAL_EECN_ERR
= 0x23,
109 SYNDROME_INVAL_EEC_STATE_ERR
= 0x24
118 u32 imm_etype_pkey_eec
;
127 struct mthca_err_cqe
{
140 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
141 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
143 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
144 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
145 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
146 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
147 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
149 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
150 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
151 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
153 static inline struct mthca_cqe
*get_cqe(struct mthca_cq
*cq
, int entry
)
156 return cq
->queue
.direct
.buf
+ (entry
* MTHCA_CQ_ENTRY_SIZE
);
158 return cq
->queue
.page_list
[entry
* MTHCA_CQ_ENTRY_SIZE
/ PAGE_SIZE
].buf
159 + (entry
* MTHCA_CQ_ENTRY_SIZE
) % PAGE_SIZE
;
162 static inline struct mthca_cqe
*cqe_sw(struct mthca_cq
*cq
, int i
)
164 struct mthca_cqe
*cqe
= get_cqe(cq
, i
);
165 return MTHCA_CQ_ENTRY_OWNER_HW
& cqe
->owner
? NULL
: cqe
;
168 static inline struct mthca_cqe
*next_cqe_sw(struct mthca_cq
*cq
)
170 return cqe_sw(cq
, cq
->cons_index
& cq
->ibcq
.cqe
);
173 static inline void set_cqe_hw(struct mthca_cqe
*cqe
)
175 cqe
->owner
= MTHCA_CQ_ENTRY_OWNER_HW
;
178 static void dump_cqe(struct mthca_dev
*dev
, void *cqe_ptr
)
180 __be32
*cqe
= cqe_ptr
;
182 (void) cqe
; /* avoid warning if mthca_dbg compiled away... */
183 mthca_dbg(dev
, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
184 be32_to_cpu(cqe
[0]), be32_to_cpu(cqe
[1]), be32_to_cpu(cqe
[2]),
185 be32_to_cpu(cqe
[3]), be32_to_cpu(cqe
[4]), be32_to_cpu(cqe
[5]),
186 be32_to_cpu(cqe
[6]), be32_to_cpu(cqe
[7]));
190 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
191 * should be correct before calling update_cons_index().
193 static inline void update_cons_index(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
198 if (mthca_is_memfree(dev
)) {
199 *cq
->set_ci_db
= cpu_to_be32(cq
->cons_index
);
202 doorbell
[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI
| cq
->cqn
);
203 doorbell
[1] = cpu_to_be32(incr
- 1);
205 mthca_write64(doorbell
,
206 dev
->kar
+ MTHCA_CQ_DOORBELL
,
207 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
211 void mthca_cq_event(struct mthca_dev
*dev
, u32 cqn
)
215 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
218 mthca_warn(dev
, "Completion event for bogus CQ %08x\n", cqn
);
224 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
227 void mthca_cq_clean(struct mthca_dev
*dev
, u32 cqn
, u32 qpn
)
230 struct mthca_cqe
*cqe
;
234 spin_lock_irq(&dev
->cq_table
.lock
);
235 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
237 atomic_inc(&cq
->refcount
);
238 spin_unlock_irq(&dev
->cq_table
.lock
);
243 spin_lock_irq(&cq
->lock
);
246 * First we need to find the current producer index, so we
247 * know where to start cleaning from. It doesn't matter if HW
248 * adds new entries after this loop -- the QP we're worried
249 * about is already in RESET, so the new entries won't come
250 * from our QP and therefore don't need to be checked.
252 for (prod_index
= cq
->cons_index
;
253 cqe_sw(cq
, prod_index
& cq
->ibcq
.cqe
);
255 if (prod_index
== cq
->cons_index
+ cq
->ibcq
.cqe
)
259 mthca_dbg(dev
, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
260 qpn
, cqn
, cq
->cons_index
, prod_index
);
263 * Now sweep backwards through the CQ, removing CQ entries
264 * that match our QP by copying older entries on top of them.
266 while (prod_index
> cq
->cons_index
) {
267 cqe
= get_cqe(cq
, (prod_index
- 1) & cq
->ibcq
.cqe
);
268 if (cqe
->my_qpn
== cpu_to_be32(qpn
))
271 memcpy(get_cqe(cq
, (prod_index
- 1 + nfreed
) &
274 MTHCA_CQ_ENTRY_SIZE
);
280 cq
->cons_index
+= nfreed
;
281 update_cons_index(dev
, cq
, nfreed
);
284 spin_unlock_irq(&cq
->lock
);
285 if (atomic_dec_and_test(&cq
->refcount
))
289 static int handle_error_cqe(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
290 struct mthca_qp
*qp
, int wqe_index
, int is_send
,
291 struct mthca_err_cqe
*cqe
,
292 struct ib_wc
*entry
, int *free_cqe
)
298 if (cqe
->syndrome
== SYNDROME_LOCAL_QP_OP_ERR
) {
299 mthca_dbg(dev
, "local QP operation err "
300 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
301 be32_to_cpu(cqe
->my_qpn
), be32_to_cpu(cqe
->wqe
),
302 cq
->cqn
, cq
->cons_index
);
307 * For completions in error, only work request ID, status (and
308 * freed resource count for RD) have to be set.
310 switch (cqe
->syndrome
) {
311 case SYNDROME_LOCAL_LENGTH_ERR
:
312 entry
->status
= IB_WC_LOC_LEN_ERR
;
314 case SYNDROME_LOCAL_QP_OP_ERR
:
315 entry
->status
= IB_WC_LOC_QP_OP_ERR
;
317 case SYNDROME_LOCAL_EEC_OP_ERR
:
318 entry
->status
= IB_WC_LOC_EEC_OP_ERR
;
320 case SYNDROME_LOCAL_PROT_ERR
:
321 entry
->status
= IB_WC_LOC_PROT_ERR
;
323 case SYNDROME_WR_FLUSH_ERR
:
324 entry
->status
= IB_WC_WR_FLUSH_ERR
;
326 case SYNDROME_MW_BIND_ERR
:
327 entry
->status
= IB_WC_MW_BIND_ERR
;
329 case SYNDROME_BAD_RESP_ERR
:
330 entry
->status
= IB_WC_BAD_RESP_ERR
;
332 case SYNDROME_LOCAL_ACCESS_ERR
:
333 entry
->status
= IB_WC_LOC_ACCESS_ERR
;
335 case SYNDROME_REMOTE_INVAL_REQ_ERR
:
336 entry
->status
= IB_WC_REM_INV_REQ_ERR
;
338 case SYNDROME_REMOTE_ACCESS_ERR
:
339 entry
->status
= IB_WC_REM_ACCESS_ERR
;
341 case SYNDROME_REMOTE_OP_ERR
:
342 entry
->status
= IB_WC_REM_OP_ERR
;
344 case SYNDROME_RETRY_EXC_ERR
:
345 entry
->status
= IB_WC_RETRY_EXC_ERR
;
347 case SYNDROME_RNR_RETRY_EXC_ERR
:
348 entry
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
350 case SYNDROME_LOCAL_RDD_VIOL_ERR
:
351 entry
->status
= IB_WC_LOC_RDD_VIOL_ERR
;
353 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR
:
354 entry
->status
= IB_WC_REM_INV_RD_REQ_ERR
;
356 case SYNDROME_REMOTE_ABORTED_ERR
:
357 entry
->status
= IB_WC_REM_ABORT_ERR
;
359 case SYNDROME_INVAL_EECN_ERR
:
360 entry
->status
= IB_WC_INV_EECN_ERR
;
362 case SYNDROME_INVAL_EEC_STATE_ERR
:
363 entry
->status
= IB_WC_INV_EEC_STATE_ERR
;
366 entry
->status
= IB_WC_GENERAL_ERR
;
370 err
= mthca_free_err_wqe(dev
, qp
, is_send
, wqe_index
, &dbd
, &new_wqe
);
375 * If we're at the end of the WQE chain, or we've used up our
376 * doorbell count, free the CQE. Otherwise just update it for
377 * the next poll operation.
379 * This does not apply to mem-free HCAs: they don't use the
380 * doorbell count field, and so we should always free the CQE.
382 if (mthca_is_memfree(dev
) ||
383 !(new_wqe
& cpu_to_be32(0x3f)) || (!cqe
->db_cnt
&& dbd
))
386 cqe
->db_cnt
= cpu_to_be16(be16_to_cpu(cqe
->db_cnt
) - dbd
);
388 cqe
->syndrome
= SYNDROME_WR_FLUSH_ERR
;
395 static inline int mthca_poll_one(struct mthca_dev
*dev
,
397 struct mthca_qp
**cur_qp
,
402 struct mthca_cqe
*cqe
;
409 cqe
= next_cqe_sw(cq
);
414 * Make sure we read CQ entry contents after we've checked the
420 mthca_dbg(dev
, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
421 cq
->cqn
, cq
->cons_index
, be32_to_cpu(cqe
->my_qpn
),
422 be32_to_cpu(cqe
->wqe
));
426 is_error
= (cqe
->opcode
& MTHCA_ERROR_CQE_OPCODE_MASK
) ==
427 MTHCA_ERROR_CQE_OPCODE_MASK
;
428 is_send
= is_error
? cqe
->opcode
& 0x01 : cqe
->is_send
& 0x80;
430 if (!*cur_qp
|| be32_to_cpu(cqe
->my_qpn
) != (*cur_qp
)->qpn
) {
432 * We do not have to take the QP table lock here,
433 * because CQs will be locked while QPs are removed
436 *cur_qp
= mthca_array_get(&dev
->qp_table
.qp
,
437 be32_to_cpu(cqe
->my_qpn
) &
438 (dev
->limits
.num_qps
- 1));
440 mthca_warn(dev
, "CQ entry for unknown QP %06x\n",
441 be32_to_cpu(cqe
->my_qpn
) & 0xffffff);
447 entry
->qp_num
= (*cur_qp
)->qpn
;
451 wqe_index
= ((be32_to_cpu(cqe
->wqe
) - (*cur_qp
)->send_wqe_offset
)
453 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
+
457 wqe_index
= be32_to_cpu(cqe
->wqe
) >> wq
->wqe_shift
;
458 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
];
461 if (wq
->last_comp
< wqe_index
)
462 wq
->tail
+= wqe_index
- wq
->last_comp
;
464 wq
->tail
+= wqe_index
+ wq
->max
- wq
->last_comp
;
466 wq
->last_comp
= wqe_index
;
469 mthca_dbg(dev
, "%s completion for QP %06x, index %d (nr %d)\n",
470 is_send
? "Send" : "Receive",
471 (*cur_qp
)->qpn
, wqe_index
, wq
->max
);
474 err
= handle_error_cqe(dev
, cq
, *cur_qp
, wqe_index
, is_send
,
475 (struct mthca_err_cqe
*) cqe
,
482 switch (cqe
->opcode
) {
483 case MTHCA_OPCODE_RDMA_WRITE
:
484 entry
->opcode
= IB_WC_RDMA_WRITE
;
486 case MTHCA_OPCODE_RDMA_WRITE_IMM
:
487 entry
->opcode
= IB_WC_RDMA_WRITE
;
488 entry
->wc_flags
|= IB_WC_WITH_IMM
;
490 case MTHCA_OPCODE_SEND
:
491 entry
->opcode
= IB_WC_SEND
;
493 case MTHCA_OPCODE_SEND_IMM
:
494 entry
->opcode
= IB_WC_SEND
;
495 entry
->wc_flags
|= IB_WC_WITH_IMM
;
497 case MTHCA_OPCODE_RDMA_READ
:
498 entry
->opcode
= IB_WC_RDMA_READ
;
499 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
501 case MTHCA_OPCODE_ATOMIC_CS
:
502 entry
->opcode
= IB_WC_COMP_SWAP
;
503 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
505 case MTHCA_OPCODE_ATOMIC_FA
:
506 entry
->opcode
= IB_WC_FETCH_ADD
;
507 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
509 case MTHCA_OPCODE_BIND_MW
:
510 entry
->opcode
= IB_WC_BIND_MW
;
513 entry
->opcode
= MTHCA_OPCODE_INVALID
;
517 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
518 switch (cqe
->opcode
& 0x1f) {
519 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE
:
520 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE
:
521 entry
->wc_flags
= IB_WC_WITH_IMM
;
522 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
523 entry
->opcode
= IB_WC_RECV
;
525 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
526 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
527 entry
->wc_flags
= IB_WC_WITH_IMM
;
528 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
529 entry
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
533 entry
->opcode
= IB_WC_RECV
;
536 entry
->slid
= be16_to_cpu(cqe
->rlid
);
537 entry
->sl
= be16_to_cpu(cqe
->sl_g_mlpath
) >> 12;
538 entry
->src_qp
= be32_to_cpu(cqe
->rqpn
) & 0xffffff;
539 entry
->dlid_path_bits
= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x7f;
540 entry
->pkey_index
= be32_to_cpu(cqe
->imm_etype_pkey_eec
) >> 16;
541 entry
->wc_flags
|= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x80 ?
545 entry
->status
= IB_WC_SUCCESS
;
548 if (likely(free_cqe
)) {
557 int mthca_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
560 struct mthca_dev
*dev
= to_mdev(ibcq
->device
);
561 struct mthca_cq
*cq
= to_mcq(ibcq
);
562 struct mthca_qp
*qp
= NULL
;
568 spin_lock_irqsave(&cq
->lock
, flags
);
570 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
571 err
= mthca_poll_one(dev
, cq
, &qp
,
572 &freed
, entry
+ npolled
);
579 update_cons_index(dev
, cq
, freed
);
582 spin_unlock_irqrestore(&cq
->lock
, flags
);
584 return err
== 0 || err
== -EAGAIN
? npolled
: err
;
587 int mthca_tavor_arm_cq(struct ib_cq
*cq
, enum ib_cq_notify notify
)
591 doorbell
[0] = cpu_to_be32((notify
== IB_CQ_SOLICITED
?
592 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL
:
593 MTHCA_TAVOR_CQ_DB_REQ_NOT
) |
595 doorbell
[1] = 0xffffffff;
597 mthca_write64(doorbell
,
598 to_mdev(cq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
599 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq
->device
)->doorbell_lock
));
604 int mthca_arbel_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify notify
)
606 struct mthca_cq
*cq
= to_mcq(ibcq
);
612 ci
= cpu_to_be32(cq
->cons_index
);
615 doorbell
[1] = cpu_to_be32((cq
->cqn
<< 8) | (2 << 5) | (sn
<< 3) |
616 (notify
== IB_CQ_SOLICITED
? 1 : 2));
618 mthca_write_db_rec(doorbell
, cq
->arm_db
);
621 * Make sure that the doorbell record in host memory is
622 * written before ringing the doorbell via PCI MMIO.
626 doorbell
[0] = cpu_to_be32((sn
<< 28) |
627 (notify
== IB_CQ_SOLICITED
?
628 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL
:
629 MTHCA_ARBEL_CQ_DB_REQ_NOT
) |
633 mthca_write64(doorbell
,
634 to_mdev(ibcq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
635 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->doorbell_lock
));
640 static void mthca_free_cq_buf(struct mthca_dev
*dev
, struct mthca_cq
*cq
)
646 dma_free_coherent(&dev
->pdev
->dev
,
647 (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
,
648 cq
->queue
.direct
.buf
,
649 pci_unmap_addr(&cq
->queue
.direct
,
652 size
= (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
;
653 for (i
= 0; i
< (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
; ++i
)
654 if (cq
->queue
.page_list
[i
].buf
)
655 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
656 cq
->queue
.page_list
[i
].buf
,
657 pci_unmap_addr(&cq
->queue
.page_list
[i
],
660 kfree(cq
->queue
.page_list
);
664 static int mthca_alloc_cq_buf(struct mthca_dev
*dev
, int size
,
669 u64
*dma_list
= NULL
;
673 if (size
<= MTHCA_MAX_DIRECT_CQ_SIZE
) {
676 shift
= get_order(size
) + PAGE_SHIFT
;
678 cq
->queue
.direct
.buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
679 size
, &t
, GFP_KERNEL
);
680 if (!cq
->queue
.direct
.buf
)
683 pci_unmap_addr_set(&cq
->queue
.direct
, mapping
, t
);
685 memset(cq
->queue
.direct
.buf
, 0, size
);
687 while (t
& ((1 << shift
) - 1)) {
692 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
696 for (i
= 0; i
< npages
; ++i
)
697 dma_list
[i
] = t
+ i
* (1 << shift
);
700 npages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
703 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
707 cq
->queue
.page_list
= kmalloc(npages
* sizeof *cq
->queue
.page_list
,
709 if (!cq
->queue
.page_list
)
712 for (i
= 0; i
< npages
; ++i
)
713 cq
->queue
.page_list
[i
].buf
= NULL
;
715 for (i
= 0; i
< npages
; ++i
) {
716 cq
->queue
.page_list
[i
].buf
=
717 dma_alloc_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
719 if (!cq
->queue
.page_list
[i
].buf
)
723 pci_unmap_addr_set(&cq
->queue
.page_list
[i
], mapping
, t
);
725 memset(cq
->queue
.page_list
[i
].buf
, 0, PAGE_SIZE
);
729 err
= mthca_mr_alloc_phys(dev
, dev
->driver_pd
.pd_num
,
730 dma_list
, shift
, npages
,
732 MTHCA_MPT_FLAG_LOCAL_WRITE
|
733 MTHCA_MPT_FLAG_LOCAL_READ
,
743 mthca_free_cq_buf(dev
, cq
);
751 int mthca_init_cq(struct mthca_dev
*dev
, int nent
,
752 struct mthca_ucontext
*ctx
, u32 pdn
,
755 int size
= nent
* MTHCA_CQ_ENTRY_SIZE
;
756 struct mthca_mailbox
*mailbox
;
757 struct mthca_cq_context
*cq_context
;
764 cq
->ibcq
.cqe
= nent
- 1;
765 cq
->is_kernel
= !ctx
;
767 cq
->cqn
= mthca_alloc(&dev
->cq_table
.alloc
);
771 if (mthca_is_memfree(dev
)) {
772 err
= mthca_table_get(dev
, dev
->cq_table
.table
, cq
->cqn
);
781 cq
->set_ci_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
,
782 cq
->cqn
, &cq
->set_ci_db
);
783 if (cq
->set_ci_db_index
< 0)
786 cq
->arm_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_ARM
,
787 cq
->cqn
, &cq
->arm_db
);
788 if (cq
->arm_db_index
< 0)
793 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
797 cq_context
= mailbox
->buf
;
800 err
= mthca_alloc_cq_buf(dev
, size
, cq
);
802 goto err_out_mailbox
;
804 for (i
= 0; i
< nent
; ++i
)
805 set_cqe_hw(get_cqe(cq
, i
));
808 spin_lock_init(&cq
->lock
);
809 atomic_set(&cq
->refcount
, 1);
810 init_waitqueue_head(&cq
->wait
);
812 memset(cq_context
, 0, sizeof *cq_context
);
813 cq_context
->flags
= cpu_to_be32(MTHCA_CQ_STATUS_OK
|
814 MTHCA_CQ_STATE_DISARMED
|
816 cq_context
->start
= cpu_to_be64(0);
817 cq_context
->logsize_usrpage
= cpu_to_be32((ffs(nent
) - 1) << 24);
819 cq_context
->logsize_usrpage
|= cpu_to_be32(ctx
->uar
.index
);
821 cq_context
->logsize_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
822 cq_context
->error_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
);
823 cq_context
->comp_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].eqn
);
824 cq_context
->pd
= cpu_to_be32(pdn
);
825 cq_context
->lkey
= cpu_to_be32(cq
->mr
.ibmr
.lkey
);
826 cq_context
->cqn
= cpu_to_be32(cq
->cqn
);
828 if (mthca_is_memfree(dev
)) {
829 cq_context
->ci_db
= cpu_to_be32(cq
->set_ci_db_index
);
830 cq_context
->state_db
= cpu_to_be32(cq
->arm_db_index
);
833 err
= mthca_SW2HW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
835 mthca_warn(dev
, "SW2HW_CQ failed (%d)\n", err
);
836 goto err_out_free_mr
;
840 mthca_warn(dev
, "SW2HW_CQ returned status 0x%02x\n",
843 goto err_out_free_mr
;
846 spin_lock_irq(&dev
->cq_table
.lock
);
847 if (mthca_array_set(&dev
->cq_table
.cq
,
848 cq
->cqn
& (dev
->limits
.num_cqs
- 1),
850 spin_unlock_irq(&dev
->cq_table
.lock
);
851 goto err_out_free_mr
;
853 spin_unlock_irq(&dev
->cq_table
.lock
);
857 mthca_free_mailbox(dev
, mailbox
);
863 mthca_free_mr(dev
, &cq
->mr
);
864 mthca_free_cq_buf(dev
, cq
);
868 mthca_free_mailbox(dev
, mailbox
);
871 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
872 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
875 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
876 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
879 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
882 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
887 void mthca_free_cq(struct mthca_dev
*dev
,
890 struct mthca_mailbox
*mailbox
;
896 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
897 if (IS_ERR(mailbox
)) {
898 mthca_warn(dev
, "No memory for mailbox to free CQ.\n");
902 err
= mthca_HW2SW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
904 mthca_warn(dev
, "HW2SW_CQ failed (%d)\n", err
);
906 mthca_warn(dev
, "HW2SW_CQ returned status 0x%02x\n", status
);
909 u32
*ctx
= mailbox
->buf
;
912 printk(KERN_ERR
"context for CQN %x (cons index %x, next sw %d)\n",
913 cq
->cqn
, cq
->cons_index
,
914 cq
->is_kernel
? !!next_cqe_sw(cq
) : 0);
915 for (j
= 0; j
< 16; ++j
)
916 printk(KERN_ERR
"[%2x] %08x\n", j
* 4, be32_to_cpu(ctx
[j
]));
919 spin_lock_irq(&dev
->cq_table
.lock
);
920 mthca_array_clear(&dev
->cq_table
.cq
,
921 cq
->cqn
& (dev
->limits
.num_cqs
- 1));
922 spin_unlock_irq(&dev
->cq_table
.lock
);
924 if (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
)
925 synchronize_irq(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].msi_x_vector
);
927 synchronize_irq(dev
->pdev
->irq
);
929 atomic_dec(&cq
->refcount
);
930 wait_event(cq
->wait
, !atomic_read(&cq
->refcount
));
933 mthca_free_mr(dev
, &cq
->mr
);
934 mthca_free_cq_buf(dev
, cq
);
935 if (mthca_is_memfree(dev
)) {
936 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
937 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
941 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
942 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
943 mthca_free_mailbox(dev
, mailbox
);
946 int __devinit
mthca_init_cq_table(struct mthca_dev
*dev
)
950 spin_lock_init(&dev
->cq_table
.lock
);
952 err
= mthca_alloc_init(&dev
->cq_table
.alloc
,
955 dev
->limits
.reserved_cqs
);
959 err
= mthca_array_init(&dev
->cq_table
.cq
,
960 dev
->limits
.num_cqs
);
962 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);
967 void __devexit
mthca_cleanup_cq_table(struct mthca_dev
*dev
)
969 mthca_array_cleanup(&dev
->cq_table
.cq
, dev
->limits
.num_cqs
);
970 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);