2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
52 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
53 MTHCA_ACK_REQ_FREQ
= 10,
54 MTHCA_FLIGHT_LIMIT
= 9,
55 MTHCA_UD_HEADER_SIZE
= 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE
= 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE
= 16 /* inline data segment chunk */
61 MTHCA_QP_STATE_RST
= 0,
62 MTHCA_QP_STATE_INIT
= 1,
63 MTHCA_QP_STATE_RTR
= 2,
64 MTHCA_QP_STATE_RTS
= 3,
65 MTHCA_QP_STATE_SQE
= 4,
66 MTHCA_QP_STATE_SQD
= 5,
67 MTHCA_QP_STATE_ERR
= 6,
68 MTHCA_QP_STATE_DRAINING
= 7
80 MTHCA_QP_PM_MIGRATED
= 0x3,
81 MTHCA_QP_PM_ARMED
= 0x0,
82 MTHCA_QP_PM_REARM
= 0x1
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE
= 1 << 8,
89 MTHCA_QP_BIT_SRE
= 1 << 15,
90 MTHCA_QP_BIT_SWE
= 1 << 14,
91 MTHCA_QP_BIT_SAE
= 1 << 13,
92 MTHCA_QP_BIT_SIC
= 1 << 4,
93 MTHCA_QP_BIT_SSC
= 1 << 3,
95 MTHCA_QP_BIT_RRE
= 1 << 15,
96 MTHCA_QP_BIT_RWE
= 1 << 14,
97 MTHCA_QP_BIT_RAE
= 1 << 13,
98 MTHCA_QP_BIT_RIC
= 1 << 4,
99 MTHCA_QP_BIT_RSC
= 1 << 3
102 struct mthca_qp_path
{
111 __be32 sl_tclass_flowlabel
;
113 } __attribute__((packed
));
115 struct mthca_qp_context
{
117 __be32 tavor_sched_queue
; /* Reserved on Arbel */
119 u8 rq_size_stride
; /* Reserved on Tavor */
120 u8 sq_size_stride
; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
126 struct mthca_qp_path pri_path
;
127 struct mthca_qp_path alt_path
;
134 __be32 next_send_psn
;
136 __be32 snd_wqe_base_l
; /* Next send WQE on Tavor */
137 __be32 snd_db_index
; /* (debugging only entries) */
138 __be32 last_acked_psn
;
141 __be32 rnr_nextrecvpsn
;
144 __be32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index
; /* (debugging only entries) */
149 __be16 rq_wqe_counter
; /* reserved on Tavor */
150 __be16 sq_wqe_counter
; /* reserved on Tavor */
152 } __attribute__((packed
));
154 struct mthca_qp_param
{
155 __be32 opt_param_mask
;
157 struct mthca_qp_context context
;
159 } __attribute__((packed
));
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
163 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
164 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
165 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
181 static const u8 mthca_opcode
[] = {
182 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
183 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
184 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
185 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
186 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
187 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
188 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
191 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
193 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
194 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
197 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
199 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
200 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
203 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
206 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
208 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
209 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
212 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
215 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
216 (n
<< qp
->sq
.wqe_shift
);
218 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
219 (n
<< qp
->sq
.wqe_shift
)) >>
221 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
225 static void mthca_wq_init(struct mthca_wq
*wq
)
227 spin_lock_init(&wq
->lock
);
229 wq
->last_comp
= wq
->max
- 1;
234 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
235 enum ib_event_type event_type
)
238 struct ib_event event
;
240 spin_lock(&dev
->qp_table
.lock
);
241 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
243 atomic_inc(&qp
->refcount
);
244 spin_unlock(&dev
->qp_table
.lock
);
247 mthca_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
251 event
.device
= &dev
->ib_dev
;
252 event
.event
= event_type
;
253 event
.element
.qp
= &qp
->ibqp
;
254 if (qp
->ibqp
.event_handler
)
255 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
257 if (atomic_dec_and_test(&qp
->refcount
))
261 static int to_mthca_state(enum ib_qp_state ib_state
)
264 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
265 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
266 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
267 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
268 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
269 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
270 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
275 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
277 static int to_mthca_st(int transport
)
280 case RC
: return MTHCA_QP_ST_RC
;
281 case UC
: return MTHCA_QP_ST_UC
;
282 case UD
: return MTHCA_QP_ST_UD
;
283 case RD
: return MTHCA_QP_ST_RD
;
284 case MLX
: return MTHCA_QP_ST_MLX
;
289 static const struct {
291 u32 req_param
[NUM_TRANS
];
292 u32 opt_param
[NUM_TRANS
];
293 } state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
295 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
296 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
298 .trans
= MTHCA_TRANS_RST2INIT
,
300 [UD
] = (IB_QP_PKEY_INDEX
|
303 [UC
] = (IB_QP_PKEY_INDEX
|
306 [RC
] = (IB_QP_PKEY_INDEX
|
309 [MLX
] = (IB_QP_PKEY_INDEX
|
312 /* bug-for-bug compatibility with VAPI: */
319 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
320 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
322 .trans
= MTHCA_TRANS_INIT2INIT
,
324 [UD
] = (IB_QP_PKEY_INDEX
|
327 [UC
] = (IB_QP_PKEY_INDEX
|
330 [RC
] = (IB_QP_PKEY_INDEX
|
333 [MLX
] = (IB_QP_PKEY_INDEX
|
338 .trans
= MTHCA_TRANS_INIT2RTR
,
348 IB_QP_MAX_DEST_RD_ATOMIC
|
349 IB_QP_MIN_RNR_TIMER
),
352 [UD
] = (IB_QP_PKEY_INDEX
|
354 [UC
] = (IB_QP_ALT_PATH
|
357 [RC
] = (IB_QP_ALT_PATH
|
360 [MLX
] = (IB_QP_PKEY_INDEX
|
366 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
367 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
369 .trans
= MTHCA_TRANS_RTR2RTS
,
373 [RC
] = (IB_QP_TIMEOUT
|
377 IB_QP_MAX_QP_RD_ATOMIC
),
378 [MLX
] = IB_QP_SQ_PSN
,
381 [UD
] = (IB_QP_CUR_STATE
|
383 [UC
] = (IB_QP_CUR_STATE
|
386 IB_QP_PATH_MIG_STATE
),
387 [RC
] = (IB_QP_CUR_STATE
|
390 IB_QP_MIN_RNR_TIMER
|
391 IB_QP_PATH_MIG_STATE
),
392 [MLX
] = (IB_QP_CUR_STATE
|
398 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
399 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
401 .trans
= MTHCA_TRANS_RTS2RTS
,
403 [UD
] = (IB_QP_CUR_STATE
|
405 [UC
] = (IB_QP_ACCESS_FLAGS
|
407 IB_QP_PATH_MIG_STATE
),
408 [RC
] = (IB_QP_ACCESS_FLAGS
|
410 IB_QP_PATH_MIG_STATE
|
411 IB_QP_MIN_RNR_TIMER
),
412 [MLX
] = (IB_QP_CUR_STATE
|
417 .trans
= MTHCA_TRANS_RTS2SQD
,
421 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
422 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
424 .trans
= MTHCA_TRANS_SQD2RTS
,
426 [UD
] = (IB_QP_CUR_STATE
|
428 [UC
] = (IB_QP_CUR_STATE
|
431 IB_QP_PATH_MIG_STATE
),
432 [RC
] = (IB_QP_CUR_STATE
|
435 IB_QP_MIN_RNR_TIMER
|
436 IB_QP_PATH_MIG_STATE
),
437 [MLX
] = (IB_QP_CUR_STATE
|
442 .trans
= MTHCA_TRANS_SQD2SQD
,
444 [UD
] = (IB_QP_PKEY_INDEX
|
451 IB_QP_PATH_MIG_STATE
),
456 IB_QP_MAX_QP_RD_ATOMIC
|
457 IB_QP_MAX_DEST_RD_ATOMIC
|
462 IB_QP_MIN_RNR_TIMER
|
463 IB_QP_PATH_MIG_STATE
),
464 [MLX
] = (IB_QP_PKEY_INDEX
|
470 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
471 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
473 .trans
= MTHCA_TRANS_SQERR2RTS
,
475 [UD
] = (IB_QP_CUR_STATE
|
477 [UC
] = (IB_QP_CUR_STATE
|
479 [MLX
] = (IB_QP_CUR_STATE
|
485 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
486 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
}
490 static void store_attrs(struct mthca_sqp
*sqp
, struct ib_qp_attr
*attr
,
493 if (attr_mask
& IB_QP_PKEY_INDEX
)
494 sqp
->pkey_index
= attr
->pkey_index
;
495 if (attr_mask
& IB_QP_QKEY
)
496 sqp
->qkey
= attr
->qkey
;
497 if (attr_mask
& IB_QP_SQ_PSN
)
498 sqp
->send_psn
= attr
->sq_psn
;
501 static void init_port(struct mthca_dev
*dev
, int port
)
505 struct mthca_init_ib_param param
;
507 memset(¶m
, 0, sizeof param
);
509 param
.port_width
= dev
->limits
.port_width_cap
;
510 param
.vl_cap
= dev
->limits
.vl_cap
;
511 param
.mtu_cap
= dev
->limits
.mtu_cap
;
512 param
.gid_cap
= dev
->limits
.gid_table_len
;
513 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
515 err
= mthca_INIT_IB(dev
, ¶m
, port
, &status
);
517 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
519 mthca_warn(dev
, "INIT_IB returned status %02x.\n", status
);
522 static __be32
get_hw_access_flags(struct mthca_qp
*qp
, struct ib_qp_attr
*attr
,
527 u32 hw_access_flags
= 0;
529 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
530 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
532 dest_rd_atomic
= qp
->resp_depth
;
534 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
535 access_flags
= attr
->qp_access_flags
;
537 access_flags
= qp
->atomic_rd_en
;
540 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
542 if (access_flags
& IB_ACCESS_REMOTE_READ
)
543 hw_access_flags
|= MTHCA_QP_BIT_RRE
;
544 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
545 hw_access_flags
|= MTHCA_QP_BIT_RAE
;
546 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
547 hw_access_flags
|= MTHCA_QP_BIT_RWE
;
549 return cpu_to_be32(hw_access_flags
);
552 static void mthca_path_set(struct ib_ah_attr
*ah
, struct mthca_qp_path
*path
)
554 path
->g_mylmc
= ah
->src_path_bits
& 0x7f;
555 path
->rlid
= cpu_to_be16(ah
->dlid
);
556 path
->static_rate
= !!ah
->static_rate
;
558 if (ah
->ah_flags
& IB_AH_GRH
) {
559 path
->g_mylmc
|= 1 << 7;
560 path
->mgid_index
= ah
->grh
.sgid_index
;
561 path
->hop_limit
= ah
->grh
.hop_limit
;
562 path
->sl_tclass_flowlabel
=
563 cpu_to_be32((ah
->sl
<< 28) |
564 (ah
->grh
.traffic_class
<< 20) |
565 (ah
->grh
.flow_label
));
566 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
568 path
->sl_tclass_flowlabel
= cpu_to_be32(ah
->sl
<< 28);
571 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
)
573 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
574 struct mthca_qp
*qp
= to_mqp(ibqp
);
575 enum ib_qp_state cur_state
, new_state
;
576 struct mthca_mailbox
*mailbox
;
577 struct mthca_qp_param
*qp_param
;
578 struct mthca_qp_context
*qp_context
;
579 u32 req_param
, opt_param
;
583 if (attr_mask
& IB_QP_CUR_STATE
) {
584 if (attr
->cur_qp_state
!= IB_QPS_RTR
&&
585 attr
->cur_qp_state
!= IB_QPS_RTS
&&
586 attr
->cur_qp_state
!= IB_QPS_SQD
&&
587 attr
->cur_qp_state
!= IB_QPS_SQE
)
590 cur_state
= attr
->cur_qp_state
;
592 spin_lock_irq(&qp
->sq
.lock
);
593 spin_lock(&qp
->rq
.lock
);
594 cur_state
= qp
->state
;
595 spin_unlock(&qp
->rq
.lock
);
596 spin_unlock_irq(&qp
->sq
.lock
);
599 if (attr_mask
& IB_QP_STATE
) {
600 if (attr
->qp_state
< 0 || attr
->qp_state
> IB_QPS_ERR
)
602 new_state
= attr
->qp_state
;
604 new_state
= cur_state
;
606 if (state_table
[cur_state
][new_state
].trans
== MTHCA_TRANS_INVALID
) {
607 mthca_dbg(dev
, "Illegal QP transition "
608 "%d->%d\n", cur_state
, new_state
);
612 req_param
= state_table
[cur_state
][new_state
].req_param
[qp
->transport
];
613 opt_param
= state_table
[cur_state
][new_state
].opt_param
[qp
->transport
];
615 if ((req_param
& attr_mask
) != req_param
) {
616 mthca_dbg(dev
, "QP transition "
617 "%d->%d missing req attr 0x%08x\n",
618 cur_state
, new_state
,
619 req_param
& ~attr_mask
);
623 if (attr_mask
& ~(req_param
| opt_param
| IB_QP_STATE
)) {
624 mthca_dbg(dev
, "QP transition (transport %d) "
625 "%d->%d has extra attr 0x%08x\n",
627 cur_state
, new_state
,
628 attr_mask
& ~(req_param
| opt_param
|
633 if ((attr_mask
& IB_QP_PKEY_INDEX
) &&
634 attr
->pkey_index
>= dev
->limits
.pkey_table_len
) {
635 mthca_dbg(dev
, "PKey index (%u) too large. max is %d\n",
636 attr
->pkey_index
,dev
->limits
.pkey_table_len
-1);
640 if ((attr_mask
& IB_QP_PORT
) &&
641 (attr
->port_num
== 0 || attr
->port_num
> dev
->limits
.num_ports
)) {
642 mthca_dbg(dev
, "Port number (%u) is invalid\n", attr
->port_num
);
646 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
647 attr
->max_rd_atomic
> dev
->limits
.max_qp_init_rdma
) {
648 mthca_dbg(dev
, "Max rdma_atomic as initiator %u too large (max is %d)\n",
649 attr
->max_rd_atomic
, dev
->limits
.max_qp_init_rdma
);
653 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
654 attr
->max_dest_rd_atomic
> 1 << dev
->qp_table
.rdb_shift
) {
655 mthca_dbg(dev
, "Max rdma_atomic as responder %u too large (max %d)\n",
656 attr
->max_dest_rd_atomic
, 1 << dev
->qp_table
.rdb_shift
);
660 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
662 return PTR_ERR(mailbox
);
663 qp_param
= mailbox
->buf
;
664 qp_context
= &qp_param
->context
;
665 memset(qp_param
, 0, sizeof *qp_param
);
667 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
668 (to_mthca_st(qp
->transport
) << 16));
669 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
670 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
671 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
673 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
674 switch (attr
->path_mig_state
) {
675 case IB_MIG_MIGRATED
:
676 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
679 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
682 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
687 /* leave tavor_sched_queue as 0 */
689 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
690 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
691 else if (attr_mask
& IB_QP_PATH_MTU
)
692 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
694 if (mthca_is_memfree(dev
)) {
696 qp_context
->rq_size_stride
= long_log2(qp
->rq
.max
) << 3;
697 qp_context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
700 qp_context
->sq_size_stride
= long_log2(qp
->sq
.max
) << 3;
701 qp_context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
704 /* leave arbel_sched_queue as 0 */
706 if (qp
->ibqp
.uobject
)
707 qp_context
->usr_page
=
708 cpu_to_be32(to_mucontext(qp
->ibqp
.uobject
->context
)->uar
.index
);
710 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
711 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
712 if (attr_mask
& IB_QP_DEST_QPN
) {
713 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
716 if (qp
->transport
== MLX
)
717 qp_context
->pri_path
.port_pkey
|=
718 cpu_to_be32(to_msqp(qp
)->port
<< 24);
720 if (attr_mask
& IB_QP_PORT
) {
721 qp_context
->pri_path
.port_pkey
|=
722 cpu_to_be32(attr
->port_num
<< 24);
723 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
727 if (attr_mask
& IB_QP_PKEY_INDEX
) {
728 qp_context
->pri_path
.port_pkey
|=
729 cpu_to_be32(attr
->pkey_index
);
730 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
733 if (attr_mask
& IB_QP_RNR_RETRY
) {
734 qp_context
->alt_path
.rnr_retry
= qp_context
->pri_path
.rnr_retry
=
735 attr
->rnr_retry
<< 5;
736 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
|
737 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
);
740 if (attr_mask
& IB_QP_AV
) {
741 mthca_path_set(&attr
->ah_attr
, &qp_context
->pri_path
);
742 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
745 if (attr_mask
& IB_QP_TIMEOUT
) {
746 qp_context
->pri_path
.ackto
= attr
->timeout
<< 3;
747 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
750 if (attr_mask
& IB_QP_ALT_PATH
) {
751 if (attr
->alt_port_num
== 0 || attr
->alt_port_num
> dev
->limits
.num_ports
) {
752 mthca_dbg(dev
, "Alternate port number (%u) is invalid\n",
757 mthca_path_set(&attr
->alt_ah_attr
, &qp_context
->alt_path
);
758 qp_context
->alt_path
.port_pkey
|= cpu_to_be32(attr
->alt_pkey_index
|
759 attr
->alt_port_num
<< 24);
760 qp_context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
761 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH
);
765 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
766 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
767 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
768 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
769 (MTHCA_FLIGHT_LIMIT
<< 24) |
771 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
772 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
773 if (attr_mask
& IB_QP_RETRY_CNT
) {
774 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
775 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
778 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
779 if (attr
->max_rd_atomic
) {
780 qp_context
->params1
|=
781 cpu_to_be32(MTHCA_QP_BIT_SRE
|
783 qp_context
->params1
|=
784 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
786 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
789 if (attr_mask
& IB_QP_SQ_PSN
)
790 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
791 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
793 if (mthca_is_memfree(dev
)) {
794 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
795 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
798 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
799 if (attr
->max_dest_rd_atomic
)
800 qp_context
->params2
|=
801 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
803 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
806 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
807 qp_context
->params2
|= get_hw_access_flags(qp
, attr
, attr_mask
);
808 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
809 MTHCA_QP_OPTPAR_RRE
|
810 MTHCA_QP_OPTPAR_RAE
);
813 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
816 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RIC
);
818 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
819 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
820 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
822 if (attr_mask
& IB_QP_RQ_PSN
)
823 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
825 qp_context
->ra_buff_indx
=
826 cpu_to_be32(dev
->qp_table
.rdb_base
+
827 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
828 dev
->qp_table
.rdb_shift
));
830 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
832 if (mthca_is_memfree(dev
))
833 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
835 if (attr_mask
& IB_QP_QKEY
) {
836 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
837 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
841 qp_context
->srqn
= cpu_to_be32(1 << 24 |
842 to_msrq(ibqp
->srq
)->srqn
);
844 err
= mthca_MODIFY_QP(dev
, state_table
[cur_state
][new_state
].trans
,
845 qp
->qpn
, 0, mailbox
, 0, &status
);
847 mthca_warn(dev
, "modify QP %d returned status %02x.\n",
848 state_table
[cur_state
][new_state
].trans
, status
);
853 qp
->state
= new_state
;
854 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
855 qp
->atomic_rd_en
= attr
->qp_access_flags
;
856 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
857 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
860 mthca_free_mailbox(dev
, mailbox
);
863 store_attrs(to_msqp(qp
), attr
, attr_mask
);
866 * If we moved QP0 to RTR, bring the IB link up; if we moved
867 * QP0 to RESET or ERROR, bring the link back down.
869 if (is_qp0(dev
, qp
)) {
870 if (cur_state
!= IB_QPS_RTR
&&
871 new_state
== IB_QPS_RTR
)
872 init_port(dev
, to_msqp(qp
)->port
);
874 if (cur_state
!= IB_QPS_RESET
&&
875 cur_state
!= IB_QPS_ERR
&&
876 (new_state
== IB_QPS_RESET
||
877 new_state
== IB_QPS_ERR
))
878 mthca_CLOSE_IB(dev
, to_msqp(qp
)->port
, &status
);
882 * If we moved a kernel QP to RESET, clean up all old CQ
883 * entries and reinitialize the QP.
885 if (!err
&& new_state
== IB_QPS_RESET
&& !qp
->ibqp
.uobject
) {
886 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
,
887 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
888 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
889 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
,
890 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
892 mthca_wq_init(&qp
->sq
);
893 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
895 mthca_wq_init(&qp
->rq
);
896 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
898 if (mthca_is_memfree(dev
)) {
907 static int mthca_max_data_size(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int desc_sz
)
910 * Calculate the maximum size of WQE s/g segments, excluding
911 * the next segment and other non-data segments.
913 int max_data_size
= desc_sz
- sizeof (struct mthca_next_seg
);
915 switch (qp
->transport
) {
917 max_data_size
-= 2 * sizeof (struct mthca_data_seg
);
921 if (mthca_is_memfree(dev
))
922 max_data_size
-= sizeof (struct mthca_arbel_ud_seg
);
924 max_data_size
-= sizeof (struct mthca_tavor_ud_seg
);
928 max_data_size
-= sizeof (struct mthca_raddr_seg
);
932 return max_data_size
;
935 static inline int mthca_max_inline_data(struct mthca_pd
*pd
, int max_data_size
)
937 /* We don't support inline data for kernel QPs (yet). */
938 return pd
->ibpd
.uobject
? max_data_size
- MTHCA_INLINE_HEADER_SIZE
: 0;
941 static void mthca_adjust_qp_caps(struct mthca_dev
*dev
,
945 int max_data_size
= mthca_max_data_size(dev
, qp
,
946 min(dev
->limits
.max_desc_sz
,
947 1 << qp
->sq
.wqe_shift
));
949 qp
->max_inline_data
= mthca_max_inline_data(pd
, max_data_size
);
951 qp
->sq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
952 max_data_size
/ sizeof (struct mthca_data_seg
));
953 qp
->rq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
954 (min(dev
->limits
.max_desc_sz
, 1 << qp
->rq
.wqe_shift
) -
955 sizeof (struct mthca_next_seg
)) /
956 sizeof (struct mthca_data_seg
));
960 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
961 * rq.max_gs and sq.max_gs must all be assigned.
962 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
963 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
966 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
973 size
= sizeof (struct mthca_next_seg
) +
974 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
976 if (size
> dev
->limits
.max_desc_sz
)
979 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
983 size
= qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
984 switch (qp
->transport
) {
986 size
+= 2 * sizeof (struct mthca_data_seg
);
990 size
+= mthca_is_memfree(dev
) ?
991 sizeof (struct mthca_arbel_ud_seg
) :
992 sizeof (struct mthca_tavor_ud_seg
);
996 size
+= sizeof (struct mthca_raddr_seg
);
1000 size
+= sizeof (struct mthca_raddr_seg
);
1002 * An atomic op will require an atomic segment, a
1003 * remote address segment and one scatter entry.
1005 size
= max_t(int, size
,
1006 sizeof (struct mthca_atomic_seg
) +
1007 sizeof (struct mthca_raddr_seg
) +
1008 sizeof (struct mthca_data_seg
));
1015 /* Make sure that we have enough space for a bind request */
1016 size
= max_t(int, size
, sizeof (struct mthca_bind_seg
));
1018 size
+= sizeof (struct mthca_next_seg
);
1020 if (size
> dev
->limits
.max_desc_sz
)
1023 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
1027 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
1028 1 << qp
->sq
.wqe_shift
);
1031 * If this is a userspace QP, we don't actually have to
1032 * allocate anything. All we need is to calculate the WQE
1033 * sizes and the send_wqe_offset, so we're done now.
1035 if (pd
->ibpd
.uobject
)
1038 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
1039 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
1041 qp
->wrid
= kmalloc((qp
->rq
.max
+ qp
->sq
.max
) * sizeof (u64
),
1046 err
= mthca_buf_alloc(dev
, size
, MTHCA_MAX_DIRECT_QP_SIZE
,
1047 &qp
->queue
, &qp
->is_direct
, pd
, 0, &qp
->mr
);
1058 static void mthca_free_wqe_buf(struct mthca_dev
*dev
,
1059 struct mthca_qp
*qp
)
1061 mthca_buf_free(dev
, PAGE_ALIGN(qp
->send_wqe_offset
+
1062 (qp
->sq
.max
<< qp
->sq
.wqe_shift
)),
1063 &qp
->queue
, qp
->is_direct
, &qp
->mr
);
1067 static int mthca_map_memfree(struct mthca_dev
*dev
,
1068 struct mthca_qp
*qp
)
1072 if (mthca_is_memfree(dev
)) {
1073 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1077 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1081 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
1082 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1091 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1094 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1099 static void mthca_unmap_memfree(struct mthca_dev
*dev
,
1100 struct mthca_qp
*qp
)
1102 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1103 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1104 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1105 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1108 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1109 struct mthca_qp
*qp
)
1113 if (mthca_is_memfree(dev
)) {
1114 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1115 qp
->qpn
, &qp
->rq
.db
);
1116 if (qp
->rq
.db_index
< 0)
1119 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1120 qp
->qpn
, &qp
->sq
.db
);
1121 if (qp
->sq
.db_index
< 0)
1122 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1128 static void mthca_free_memfree(struct mthca_dev
*dev
,
1129 struct mthca_qp
*qp
)
1131 if (mthca_is_memfree(dev
)) {
1132 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1133 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1137 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1138 struct mthca_pd
*pd
,
1139 struct mthca_cq
*send_cq
,
1140 struct mthca_cq
*recv_cq
,
1141 enum ib_sig_type send_policy
,
1142 struct mthca_qp
*qp
)
1147 atomic_set(&qp
->refcount
, 1);
1148 init_waitqueue_head(&qp
->wait
);
1149 qp
->state
= IB_QPS_RESET
;
1150 qp
->atomic_rd_en
= 0;
1152 qp
->sq_policy
= send_policy
;
1153 mthca_wq_init(&qp
->sq
);
1154 mthca_wq_init(&qp
->rq
);
1156 ret
= mthca_map_memfree(dev
, qp
);
1160 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
);
1162 mthca_unmap_memfree(dev
, qp
);
1166 mthca_adjust_qp_caps(dev
, pd
, qp
);
1169 * If this is a userspace QP, we're done now. The doorbells
1170 * will be allocated and buffers will be initialized in
1173 if (pd
->ibpd
.uobject
)
1176 ret
= mthca_alloc_memfree(dev
, qp
);
1178 mthca_free_wqe_buf(dev
, qp
);
1179 mthca_unmap_memfree(dev
, qp
);
1183 if (mthca_is_memfree(dev
)) {
1184 struct mthca_next_seg
*next
;
1185 struct mthca_data_seg
*scatter
;
1186 int size
= (sizeof (struct mthca_next_seg
) +
1187 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1189 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1190 next
= get_recv_wqe(qp
, i
);
1191 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1193 next
->ee_nds
= cpu_to_be32(size
);
1195 for (scatter
= (void *) (next
+ 1);
1196 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1198 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1201 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1202 next
= get_send_wqe(qp
, i
);
1203 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1205 qp
->send_wqe_offset
);
1209 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
1210 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
1215 static int mthca_set_qp_size(struct mthca_dev
*dev
, struct ib_qp_cap
*cap
,
1216 struct mthca_pd
*pd
, struct mthca_qp
*qp
)
1218 int max_data_size
= mthca_max_data_size(dev
, qp
, dev
->limits
.max_desc_sz
);
1220 /* Sanity check QP size before proceeding */
1221 if (cap
->max_send_wr
> dev
->limits
.max_wqes
||
1222 cap
->max_recv_wr
> dev
->limits
.max_wqes
||
1223 cap
->max_send_sge
> dev
->limits
.max_sg
||
1224 cap
->max_recv_sge
> dev
->limits
.max_sg
||
1225 cap
->max_inline_data
> mthca_max_inline_data(pd
, max_data_size
))
1229 * For MLX transport we need 2 extra S/G entries:
1230 * one for the header and one for the checksum at the end
1232 if (qp
->transport
== MLX
&& cap
->max_recv_sge
+ 2 > dev
->limits
.max_sg
)
1235 if (mthca_is_memfree(dev
)) {
1236 qp
->rq
.max
= cap
->max_recv_wr
?
1237 roundup_pow_of_two(cap
->max_recv_wr
) : 0;
1238 qp
->sq
.max
= cap
->max_send_wr
?
1239 roundup_pow_of_two(cap
->max_send_wr
) : 0;
1241 qp
->rq
.max
= cap
->max_recv_wr
;
1242 qp
->sq
.max
= cap
->max_send_wr
;
1245 qp
->rq
.max_gs
= cap
->max_recv_sge
;
1246 qp
->sq
.max_gs
= max_t(int, cap
->max_send_sge
,
1247 ALIGN(cap
->max_inline_data
+ MTHCA_INLINE_HEADER_SIZE
,
1248 MTHCA_INLINE_CHUNK_SIZE
) /
1249 sizeof (struct mthca_data_seg
));
1254 int mthca_alloc_qp(struct mthca_dev
*dev
,
1255 struct mthca_pd
*pd
,
1256 struct mthca_cq
*send_cq
,
1257 struct mthca_cq
*recv_cq
,
1258 enum ib_qp_type type
,
1259 enum ib_sig_type send_policy
,
1260 struct ib_qp_cap
*cap
,
1261 struct mthca_qp
*qp
)
1265 err
= mthca_set_qp_size(dev
, cap
, pd
, qp
);
1270 case IB_QPT_RC
: qp
->transport
= RC
; break;
1271 case IB_QPT_UC
: qp
->transport
= UC
; break;
1272 case IB_QPT_UD
: qp
->transport
= UD
; break;
1273 default: return -EINVAL
;
1276 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1280 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1283 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1287 spin_lock_irq(&dev
->qp_table
.lock
);
1288 mthca_array_set(&dev
->qp_table
.qp
,
1289 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1290 spin_unlock_irq(&dev
->qp_table
.lock
);
1295 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1296 struct mthca_pd
*pd
,
1297 struct mthca_cq
*send_cq
,
1298 struct mthca_cq
*recv_cq
,
1299 enum ib_sig_type send_policy
,
1300 struct ib_qp_cap
*cap
,
1303 struct mthca_sqp
*sqp
)
1305 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1308 err
= mthca_set_qp_size(dev
, cap
, pd
, &sqp
->qp
);
1312 sqp
->header_buf_size
= sqp
->qp
.sq
.max
* MTHCA_UD_HEADER_SIZE
;
1313 sqp
->header_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1314 &sqp
->header_dma
, GFP_KERNEL
);
1315 if (!sqp
->header_buf
)
1318 spin_lock_irq(&dev
->qp_table
.lock
);
1319 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1322 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, sqp
);
1323 spin_unlock_irq(&dev
->qp_table
.lock
);
1330 sqp
->qp
.transport
= MLX
;
1332 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1333 send_policy
, &sqp
->qp
);
1337 atomic_inc(&pd
->sqp_count
);
1343 * Lock CQs here, so that CQ polling code can do QP lookup
1344 * without taking a lock.
1346 spin_lock_irq(&send_cq
->lock
);
1347 if (send_cq
!= recv_cq
)
1348 spin_lock(&recv_cq
->lock
);
1350 spin_lock(&dev
->qp_table
.lock
);
1351 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1352 spin_unlock(&dev
->qp_table
.lock
);
1354 if (send_cq
!= recv_cq
)
1355 spin_unlock(&recv_cq
->lock
);
1356 spin_unlock_irq(&send_cq
->lock
);
1359 dma_free_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1360 sqp
->header_buf
, sqp
->header_dma
);
1365 void mthca_free_qp(struct mthca_dev
*dev
,
1366 struct mthca_qp
*qp
)
1369 struct mthca_cq
*send_cq
;
1370 struct mthca_cq
*recv_cq
;
1372 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1373 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1376 * Lock CQs here, so that CQ polling code can do QP lookup
1377 * without taking a lock.
1379 spin_lock_irq(&send_cq
->lock
);
1380 if (send_cq
!= recv_cq
)
1381 spin_lock(&recv_cq
->lock
);
1383 spin_lock(&dev
->qp_table
.lock
);
1384 mthca_array_clear(&dev
->qp_table
.qp
,
1385 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1386 spin_unlock(&dev
->qp_table
.lock
);
1388 if (send_cq
!= recv_cq
)
1389 spin_unlock(&recv_cq
->lock
);
1390 spin_unlock_irq(&send_cq
->lock
);
1392 atomic_dec(&qp
->refcount
);
1393 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1395 if (qp
->state
!= IB_QPS_RESET
)
1396 mthca_MODIFY_QP(dev
, MTHCA_TRANS_ANY2RST
, qp
->qpn
, 0, NULL
, 0, &status
);
1399 * If this is a userspace QP, the buffers, MR, CQs and so on
1400 * will be cleaned up in userspace, so all we have to do is
1401 * unref the mem-free tables and free the QPN in our table.
1403 if (!qp
->ibqp
.uobject
) {
1404 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
,
1405 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1406 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
1407 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
,
1408 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1410 mthca_free_memfree(dev
, qp
);
1411 mthca_free_wqe_buf(dev
, qp
);
1414 mthca_unmap_memfree(dev
, qp
);
1416 if (is_sqp(dev
, qp
)) {
1417 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1418 dma_free_coherent(&dev
->pdev
->dev
,
1419 to_msqp(qp
)->header_buf_size
,
1420 to_msqp(qp
)->header_buf
,
1421 to_msqp(qp
)->header_dma
);
1423 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1426 /* Create UD header for an MLX send and build a data segment for it */
1427 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_sqp
*sqp
,
1428 int ind
, struct ib_send_wr
*wr
,
1429 struct mthca_mlx_seg
*mlx
,
1430 struct mthca_data_seg
*data
)
1436 ib_ud_header_init(256, /* assume a MAD */
1437 mthca_ah_grh_present(to_mah(wr
->wr
.ud
.ah
)),
1440 err
= mthca_read_ah(dev
, to_mah(wr
->wr
.ud
.ah
), &sqp
->ud_header
);
1443 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1444 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1445 (sqp
->ud_header
.lrh
.destination_lid
==
1446 IB_LID_PERMISSIVE
? MTHCA_MLX_SLR
: 0) |
1447 (sqp
->ud_header
.lrh
.service_level
<< 8));
1448 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1451 switch (wr
->opcode
) {
1453 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1454 sqp
->ud_header
.immediate_present
= 0;
1456 case IB_WR_SEND_WITH_IMM
:
1457 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1458 sqp
->ud_header
.immediate_present
= 1;
1459 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1465 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1466 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1467 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1468 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1469 if (!sqp
->qp
.ibqp
.qp_num
)
1470 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1471 sqp
->pkey_index
, &pkey
);
1473 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1474 wr
->wr
.ud
.pkey_index
, &pkey
);
1475 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1476 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1477 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1478 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1479 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1480 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1482 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1484 ind
* MTHCA_UD_HEADER_SIZE
);
1486 data
->byte_count
= cpu_to_be32(header_size
);
1487 data
->lkey
= cpu_to_be32(to_mpd(sqp
->qp
.ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1488 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1489 ind
* MTHCA_UD_HEADER_SIZE
);
1494 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1495 struct ib_cq
*ib_cq
)
1498 struct mthca_cq
*cq
;
1500 cur
= wq
->head
- wq
->tail
;
1501 if (likely(cur
+ nreq
< wq
->max
))
1505 spin_lock(&cq
->lock
);
1506 cur
= wq
->head
- wq
->tail
;
1507 spin_unlock(&cq
->lock
);
1509 return cur
+ nreq
>= wq
->max
;
1512 int mthca_tavor_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1513 struct ib_send_wr
**bad_wr
)
1515 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1516 struct mthca_qp
*qp
= to_mqp(ibqp
);
1519 unsigned long flags
;
1529 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1531 /* XXX check that state is OK to post send */
1533 ind
= qp
->sq
.next_ind
;
1535 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1536 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1537 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1538 " %d max, %d nreq)\n", qp
->qpn
,
1539 qp
->sq
.head
, qp
->sq
.tail
,
1546 wqe
= get_send_wqe(qp
, ind
);
1547 prev_wqe
= qp
->sq
.last
;
1550 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1551 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1552 ((struct mthca_next_seg
*) wqe
)->flags
=
1553 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1554 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1555 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1556 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1558 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1559 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1560 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1562 wqe
+= sizeof (struct mthca_next_seg
);
1563 size
= sizeof (struct mthca_next_seg
) / 16;
1565 switch (qp
->transport
) {
1567 switch (wr
->opcode
) {
1568 case IB_WR_ATOMIC_CMP_AND_SWP
:
1569 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1570 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1571 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1572 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1573 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1574 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1576 wqe
+= sizeof (struct mthca_raddr_seg
);
1578 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1579 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1580 cpu_to_be64(wr
->wr
.atomic
.swap
);
1581 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1582 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1584 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1585 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1586 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1589 wqe
+= sizeof (struct mthca_atomic_seg
);
1590 size
+= (sizeof (struct mthca_raddr_seg
) +
1591 sizeof (struct mthca_atomic_seg
)) / 16;
1594 case IB_WR_RDMA_WRITE
:
1595 case IB_WR_RDMA_WRITE_WITH_IMM
:
1596 case IB_WR_RDMA_READ
:
1597 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1598 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1599 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1600 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1601 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1602 wqe
+= sizeof (struct mthca_raddr_seg
);
1603 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1607 /* No extra segments required for sends */
1614 switch (wr
->opcode
) {
1615 case IB_WR_RDMA_WRITE
:
1616 case IB_WR_RDMA_WRITE_WITH_IMM
:
1617 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1618 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1619 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1620 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1621 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1622 wqe
+= sizeof (struct mthca_raddr_seg
);
1623 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1627 /* No extra segments required for sends */
1634 ((struct mthca_tavor_ud_seg
*) wqe
)->lkey
=
1635 cpu_to_be32(to_mah(wr
->wr
.ud
.ah
)->key
);
1636 ((struct mthca_tavor_ud_seg
*) wqe
)->av_addr
=
1637 cpu_to_be64(to_mah(wr
->wr
.ud
.ah
)->avdma
);
1638 ((struct mthca_tavor_ud_seg
*) wqe
)->dqpn
=
1639 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1640 ((struct mthca_tavor_ud_seg
*) wqe
)->qkey
=
1641 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1643 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1644 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1648 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1649 wqe
- sizeof (struct mthca_next_seg
),
1655 wqe
+= sizeof (struct mthca_data_seg
);
1656 size
+= sizeof (struct mthca_data_seg
) / 16;
1660 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1661 mthca_err(dev
, "too many gathers\n");
1667 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1668 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1669 cpu_to_be32(wr
->sg_list
[i
].length
);
1670 ((struct mthca_data_seg
*) wqe
)->lkey
=
1671 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1672 ((struct mthca_data_seg
*) wqe
)->addr
=
1673 cpu_to_be64(wr
->sg_list
[i
].addr
);
1674 wqe
+= sizeof (struct mthca_data_seg
);
1675 size
+= sizeof (struct mthca_data_seg
) / 16;
1678 /* Add one more inline data segment for ICRC */
1679 if (qp
->transport
== MLX
) {
1680 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1681 cpu_to_be32((1 << 31) | 4);
1682 ((u32
*) wqe
)[1] = 0;
1683 wqe
+= sizeof (struct mthca_data_seg
);
1684 size
+= sizeof (struct mthca_data_seg
) / 16;
1687 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1689 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1690 mthca_err(dev
, "opcode invalid\n");
1696 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1697 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1698 qp
->send_wqe_offset
) |
1699 mthca_opcode
[wr
->opcode
]);
1701 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1702 cpu_to_be32((size0
? 0 : MTHCA_NEXT_DBD
) | size
);
1706 op0
= mthca_opcode
[wr
->opcode
];
1710 if (unlikely(ind
>= qp
->sq
.max
))
1718 doorbell
[0] = cpu_to_be32(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1719 qp
->send_wqe_offset
) | f0
| op0
);
1720 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1724 mthca_write64(doorbell
,
1725 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1726 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1729 qp
->sq
.next_ind
= ind
;
1730 qp
->sq
.head
+= nreq
;
1732 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1736 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1737 struct ib_recv_wr
**bad_wr
)
1739 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1740 struct mthca_qp
*qp
= to_mqp(ibqp
);
1742 unsigned long flags
;
1752 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1754 /* XXX check that state is OK to post receive */
1756 ind
= qp
->rq
.next_ind
;
1758 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1759 if (unlikely(nreq
== MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
)) {
1762 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1763 doorbell
[1] = cpu_to_be32(qp
->qpn
<< 8);
1767 mthca_write64(doorbell
,
1768 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1769 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1771 qp
->rq
.head
+= MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
;
1775 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1776 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1777 " %d max, %d nreq)\n", qp
->qpn
,
1778 qp
->rq
.head
, qp
->rq
.tail
,
1785 wqe
= get_recv_wqe(qp
, ind
);
1786 prev_wqe
= qp
->rq
.last
;
1789 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1790 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1791 cpu_to_be32(MTHCA_NEXT_DBD
);
1792 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1794 wqe
+= sizeof (struct mthca_next_seg
);
1795 size
= sizeof (struct mthca_next_seg
) / 16;
1797 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1803 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1804 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1805 cpu_to_be32(wr
->sg_list
[i
].length
);
1806 ((struct mthca_data_seg
*) wqe
)->lkey
=
1807 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1808 ((struct mthca_data_seg
*) wqe
)->addr
=
1809 cpu_to_be64(wr
->sg_list
[i
].addr
);
1810 wqe
+= sizeof (struct mthca_data_seg
);
1811 size
+= sizeof (struct mthca_data_seg
) / 16;
1814 qp
->wrid
[ind
] = wr
->wr_id
;
1816 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1817 cpu_to_be32((ind
<< qp
->rq
.wqe_shift
) | 1);
1819 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1820 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1826 if (unlikely(ind
>= qp
->rq
.max
))
1832 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1833 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | nreq
);
1837 mthca_write64(doorbell
,
1838 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1839 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1842 qp
->rq
.next_ind
= ind
;
1843 qp
->rq
.head
+= nreq
;
1845 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1849 int mthca_arbel_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1850 struct ib_send_wr
**bad_wr
)
1852 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1853 struct mthca_qp
*qp
= to_mqp(ibqp
);
1857 unsigned long flags
;
1867 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1869 /* XXX check that state is OK to post send */
1871 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1873 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1874 if (unlikely(nreq
== MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
)) {
1877 doorbell
[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
<< 24) |
1878 ((qp
->sq
.head
& 0xffff) << 8) |
1880 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1882 qp
->sq
.head
+= MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
;
1886 * Make sure that descriptors are written before
1890 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
1893 * Make sure doorbell record is written before we
1894 * write MMIO send doorbell.
1897 mthca_write64(doorbell
,
1898 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1899 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1902 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1903 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1904 " %d max, %d nreq)\n", qp
->qpn
,
1905 qp
->sq
.head
, qp
->sq
.tail
,
1912 wqe
= get_send_wqe(qp
, ind
);
1913 prev_wqe
= qp
->sq
.last
;
1916 ((struct mthca_next_seg
*) wqe
)->flags
=
1917 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1918 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1919 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1920 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1922 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1923 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1924 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1926 wqe
+= sizeof (struct mthca_next_seg
);
1927 size
= sizeof (struct mthca_next_seg
) / 16;
1929 switch (qp
->transport
) {
1931 switch (wr
->opcode
) {
1932 case IB_WR_ATOMIC_CMP_AND_SWP
:
1933 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1934 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1935 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1936 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1937 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1938 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1940 wqe
+= sizeof (struct mthca_raddr_seg
);
1942 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1943 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1944 cpu_to_be64(wr
->wr
.atomic
.swap
);
1945 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1946 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1948 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1949 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1950 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1953 wqe
+= sizeof (struct mthca_atomic_seg
);
1954 size
+= (sizeof (struct mthca_raddr_seg
) +
1955 sizeof (struct mthca_atomic_seg
)) / 16;
1958 case IB_WR_RDMA_READ
:
1959 case IB_WR_RDMA_WRITE
:
1960 case IB_WR_RDMA_WRITE_WITH_IMM
:
1961 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1962 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1963 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1964 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1965 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1966 wqe
+= sizeof (struct mthca_raddr_seg
);
1967 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1971 /* No extra segments required for sends */
1978 switch (wr
->opcode
) {
1979 case IB_WR_RDMA_WRITE
:
1980 case IB_WR_RDMA_WRITE_WITH_IMM
:
1981 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1982 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1983 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1984 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1985 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1986 wqe
+= sizeof (struct mthca_raddr_seg
);
1987 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1991 /* No extra segments required for sends */
1998 memcpy(((struct mthca_arbel_ud_seg
*) wqe
)->av
,
1999 to_mah(wr
->wr
.ud
.ah
)->av
, MTHCA_AV_SIZE
);
2000 ((struct mthca_arbel_ud_seg
*) wqe
)->dqpn
=
2001 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2002 ((struct mthca_arbel_ud_seg
*) wqe
)->qkey
=
2003 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2005 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
2006 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
2010 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
2011 wqe
- sizeof (struct mthca_next_seg
),
2017 wqe
+= sizeof (struct mthca_data_seg
);
2018 size
+= sizeof (struct mthca_data_seg
) / 16;
2022 if (wr
->num_sge
> qp
->sq
.max_gs
) {
2023 mthca_err(dev
, "too many gathers\n");
2029 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2030 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2031 cpu_to_be32(wr
->sg_list
[i
].length
);
2032 ((struct mthca_data_seg
*) wqe
)->lkey
=
2033 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2034 ((struct mthca_data_seg
*) wqe
)->addr
=
2035 cpu_to_be64(wr
->sg_list
[i
].addr
);
2036 wqe
+= sizeof (struct mthca_data_seg
);
2037 size
+= sizeof (struct mthca_data_seg
) / 16;
2040 /* Add one more inline data segment for ICRC */
2041 if (qp
->transport
== MLX
) {
2042 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2043 cpu_to_be32((1 << 31) | 4);
2044 ((u32
*) wqe
)[1] = 0;
2045 wqe
+= sizeof (struct mthca_data_seg
);
2046 size
+= sizeof (struct mthca_data_seg
) / 16;
2049 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
2051 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
2052 mthca_err(dev
, "opcode invalid\n");
2058 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
2059 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
2060 qp
->send_wqe_offset
) |
2061 mthca_opcode
[wr
->opcode
]);
2063 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
2064 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
2068 op0
= mthca_opcode
[wr
->opcode
];
2072 if (unlikely(ind
>= qp
->sq
.max
))
2078 doorbell
[0] = cpu_to_be32((nreq
<< 24) |
2079 ((qp
->sq
.head
& 0xffff) << 8) |
2081 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
2083 qp
->sq
.head
+= nreq
;
2086 * Make sure that descriptors are written before
2090 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
2093 * Make sure doorbell record is written before we
2094 * write MMIO send doorbell.
2097 mthca_write64(doorbell
,
2098 dev
->kar
+ MTHCA_SEND_DOORBELL
,
2099 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
2102 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2106 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2107 struct ib_recv_wr
**bad_wr
)
2109 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
2110 struct mthca_qp
*qp
= to_mqp(ibqp
);
2111 unsigned long flags
;
2118 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2120 /* XXX check that state is OK to post receive */
2122 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
2124 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2125 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2126 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
2127 " %d max, %d nreq)\n", qp
->qpn
,
2128 qp
->rq
.head
, qp
->rq
.tail
,
2135 wqe
= get_recv_wqe(qp
, ind
);
2137 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
2139 wqe
+= sizeof (struct mthca_next_seg
);
2141 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2147 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2148 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2149 cpu_to_be32(wr
->sg_list
[i
].length
);
2150 ((struct mthca_data_seg
*) wqe
)->lkey
=
2151 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2152 ((struct mthca_data_seg
*) wqe
)->addr
=
2153 cpu_to_be64(wr
->sg_list
[i
].addr
);
2154 wqe
+= sizeof (struct mthca_data_seg
);
2157 if (i
< qp
->rq
.max_gs
) {
2158 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
2159 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
2160 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
2163 qp
->wrid
[ind
] = wr
->wr_id
;
2166 if (unlikely(ind
>= qp
->rq
.max
))
2171 qp
->rq
.head
+= nreq
;
2174 * Make sure that descriptors are written before
2178 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2181 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2185 int mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2186 int index
, int *dbd
, __be32
*new_wqe
)
2188 struct mthca_next_seg
*next
;
2191 * For SRQs, all WQEs generate a CQE, so we're always at the
2192 * end of the doorbell chain.
2200 next
= get_send_wqe(qp
, index
);
2202 next
= get_recv_wqe(qp
, index
);
2204 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2205 if (next
->ee_nds
& cpu_to_be32(0x3f))
2206 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2207 (next
->ee_nds
& cpu_to_be32(0x3f));
2214 int __devinit
mthca_init_qp_table(struct mthca_dev
*dev
)
2220 spin_lock_init(&dev
->qp_table
.lock
);
2223 * We reserve 2 extra QPs per port for the special QPs. The
2224 * special QP for port 1 has to be even, so round up.
2226 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2227 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2228 dev
->limits
.num_qps
,
2230 dev
->qp_table
.sqp_start
+
2231 MTHCA_MAX_PORTS
* 2);
2235 err
= mthca_array_init(&dev
->qp_table
.qp
,
2236 dev
->limits
.num_qps
);
2238 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2242 for (i
= 0; i
< 2; ++i
) {
2243 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2244 dev
->qp_table
.sqp_start
+ i
* 2,
2249 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2250 "status %02x, aborting.\n",
2259 for (i
= 0; i
< 2; ++i
)
2260 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2262 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2263 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2268 void __devexit
mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2273 for (i
= 0; i
< 2; ++i
)
2274 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2276 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2277 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);