2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
35 #include "mthca_dev.h"
36 #include "mthca_cmd.h"
37 #include "mthca_memfree.h"
38 #include "mthca_wqe.h"
41 MTHCA_MAX_DIRECT_SRQ_SIZE
= 4 * PAGE_SIZE
44 struct mthca_tavor_srq_context
{
45 __be64 wqe_base_ds
; /* low 6 bits is descriptor size */
53 struct mthca_arbel_srq_context
{
54 __be32 state_logsize_srqn
;
57 __be32 logstride_usrpage
;
60 __be16 limit_watermark
;
67 static void *get_wqe(struct mthca_srq
*srq
, int n
)
70 return srq
->queue
.direct
.buf
+ (n
<< srq
->wqe_shift
);
72 return srq
->queue
.page_list
[(n
<< srq
->wqe_shift
) >> PAGE_SHIFT
].buf
+
73 ((n
<< srq
->wqe_shift
) & (PAGE_SIZE
- 1));
77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4
79 * because in the Tavor case, posting a WQE may overwrite the first
80 * four bytes of the previous WQE. The offset avoids corrupting our
81 * free list if the WQE has already completed and been put on the free
82 * list when we post the next WQE.
84 static inline int *wqe_to_link(void *wqe
)
86 return (int *) (wqe
+ 4);
89 static void mthca_tavor_init_srq_context(struct mthca_dev
*dev
,
91 struct mthca_srq
*srq
,
92 struct mthca_tavor_srq_context
*context
)
94 memset(context
, 0, sizeof *context
);
96 context
->wqe_base_ds
= cpu_to_be64(1 << (srq
->wqe_shift
- 4));
97 context
->state_pd
= cpu_to_be32(pd
->pd_num
);
98 context
->lkey
= cpu_to_be32(srq
->mr
.ibmr
.lkey
);
100 if (pd
->ibpd
.uobject
)
102 cpu_to_be32(to_mucontext(pd
->ibpd
.uobject
->context
)->uar
.index
);
104 context
->uar
= cpu_to_be32(dev
->driver_uar
.index
);
107 static void mthca_arbel_init_srq_context(struct mthca_dev
*dev
,
109 struct mthca_srq
*srq
,
110 struct mthca_arbel_srq_context
*context
)
114 memset(context
, 0, sizeof *context
);
116 logsize
= long_log2(srq
->max
) + srq
->wqe_shift
;
117 context
->state_logsize_srqn
= cpu_to_be32(logsize
<< 24 | srq
->srqn
);
118 context
->lkey
= cpu_to_be32(srq
->mr
.ibmr
.lkey
);
119 context
->db_index
= cpu_to_be32(srq
->db_index
);
120 context
->logstride_usrpage
= cpu_to_be32((srq
->wqe_shift
- 4) << 29);
121 if (pd
->ibpd
.uobject
)
122 context
->logstride_usrpage
|=
123 cpu_to_be32(to_mucontext(pd
->ibpd
.uobject
->context
)->uar
.index
);
125 context
->logstride_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
126 context
->eq_pd
= cpu_to_be32(MTHCA_EQ_ASYNC
<< 24 | pd
->pd_num
);
129 static void mthca_free_srq_buf(struct mthca_dev
*dev
, struct mthca_srq
*srq
)
131 mthca_buf_free(dev
, srq
->max
<< srq
->wqe_shift
, &srq
->queue
,
132 srq
->is_direct
, &srq
->mr
);
136 static int mthca_alloc_srq_buf(struct mthca_dev
*dev
, struct mthca_pd
*pd
,
137 struct mthca_srq
*srq
)
139 struct mthca_data_seg
*scatter
;
144 if (pd
->ibpd
.uobject
)
147 srq
->wrid
= kmalloc(srq
->max
* sizeof (u64
), GFP_KERNEL
);
151 err
= mthca_buf_alloc(dev
, srq
->max
<< srq
->wqe_shift
,
152 MTHCA_MAX_DIRECT_SRQ_SIZE
,
153 &srq
->queue
, &srq
->is_direct
, pd
, 1, &srq
->mr
);
160 * Now initialize the SRQ buffer so that all of the WQEs are
161 * linked into the list of free WQEs. In addition, set the
162 * scatter list L_Keys to the sentry value of 0x100.
164 for (i
= 0; i
< srq
->max
; ++i
) {
165 wqe
= get_wqe(srq
, i
);
167 *wqe_to_link(wqe
) = i
< srq
->max
- 1 ? i
+ 1 : -1;
169 for (scatter
= wqe
+ sizeof (struct mthca_next_seg
);
170 (void *) scatter
< wqe
+ (1 << srq
->wqe_shift
);
172 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
178 int mthca_alloc_srq(struct mthca_dev
*dev
, struct mthca_pd
*pd
,
179 struct ib_srq_attr
*attr
, struct mthca_srq
*srq
)
181 struct mthca_mailbox
*mailbox
;
186 /* Sanity check SRQ size before proceeding */
187 if (attr
->max_wr
> 16 << 20 || attr
->max_sge
> 64)
190 srq
->max
= attr
->max_wr
;
191 srq
->max_gs
= attr
->max_sge
;
194 if (mthca_is_memfree(dev
))
195 srq
->max
= roundup_pow_of_two(srq
->max
+ 1);
198 roundup_pow_of_two(sizeof (struct mthca_next_seg
) +
199 srq
->max_gs
* sizeof (struct mthca_data_seg
)));
200 srq
->wqe_shift
= long_log2(ds
);
202 srq
->srqn
= mthca_alloc(&dev
->srq_table
.alloc
);
206 if (mthca_is_memfree(dev
)) {
207 err
= mthca_table_get(dev
, dev
->srq_table
.table
, srq
->srqn
);
211 if (!pd
->ibpd
.uobject
) {
212 srq
->db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SRQ
,
213 srq
->srqn
, &srq
->db
);
214 if (srq
->db_index
< 0) {
221 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
222 if (IS_ERR(mailbox
)) {
223 err
= PTR_ERR(mailbox
);
227 err
= mthca_alloc_srq_buf(dev
, pd
, srq
);
229 goto err_out_mailbox
;
231 spin_lock_init(&srq
->lock
);
232 atomic_set(&srq
->refcount
, 1);
233 init_waitqueue_head(&srq
->wait
);
235 if (mthca_is_memfree(dev
))
236 mthca_arbel_init_srq_context(dev
, pd
, srq
, mailbox
->buf
);
238 mthca_tavor_init_srq_context(dev
, pd
, srq
, mailbox
->buf
);
240 err
= mthca_SW2HW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
243 mthca_warn(dev
, "SW2HW_SRQ failed (%d)\n", err
);
244 goto err_out_free_buf
;
247 mthca_warn(dev
, "SW2HW_SRQ returned status 0x%02x\n",
250 goto err_out_free_buf
;
253 spin_lock_irq(&dev
->srq_table
.lock
);
254 if (mthca_array_set(&dev
->srq_table
.srq
,
255 srq
->srqn
& (dev
->limits
.num_srqs
- 1),
257 spin_unlock_irq(&dev
->srq_table
.lock
);
258 goto err_out_free_srq
;
260 spin_unlock_irq(&dev
->srq_table
.lock
);
262 mthca_free_mailbox(dev
, mailbox
);
265 srq
->last_free
= srq
->max
- 1;
266 srq
->last
= get_wqe(srq
, srq
->max
- 1);
271 err
= mthca_HW2SW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
273 mthca_warn(dev
, "HW2SW_SRQ failed (%d)\n", err
);
275 mthca_warn(dev
, "HW2SW_SRQ returned status 0x%02x\n", status
);
278 if (!pd
->ibpd
.uobject
)
279 mthca_free_srq_buf(dev
, srq
);
282 mthca_free_mailbox(dev
, mailbox
);
285 if (!pd
->ibpd
.uobject
&& mthca_is_memfree(dev
))
286 mthca_free_db(dev
, MTHCA_DB_TYPE_SRQ
, srq
->db_index
);
289 mthca_table_put(dev
, dev
->srq_table
.table
, srq
->srqn
);
292 mthca_free(&dev
->srq_table
.alloc
, srq
->srqn
);
297 void mthca_free_srq(struct mthca_dev
*dev
, struct mthca_srq
*srq
)
299 struct mthca_mailbox
*mailbox
;
303 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
304 if (IS_ERR(mailbox
)) {
305 mthca_warn(dev
, "No memory for mailbox to free SRQ.\n");
309 err
= mthca_HW2SW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
311 mthca_warn(dev
, "HW2SW_SRQ failed (%d)\n", err
);
313 mthca_warn(dev
, "HW2SW_SRQ returned status 0x%02x\n", status
);
315 spin_lock_irq(&dev
->srq_table
.lock
);
316 mthca_array_clear(&dev
->srq_table
.srq
,
317 srq
->srqn
& (dev
->limits
.num_srqs
- 1));
318 spin_unlock_irq(&dev
->srq_table
.lock
);
320 atomic_dec(&srq
->refcount
);
321 wait_event(srq
->wait
, !atomic_read(&srq
->refcount
));
323 if (!srq
->ibsrq
.uobject
) {
324 mthca_free_srq_buf(dev
, srq
);
325 if (mthca_is_memfree(dev
))
326 mthca_free_db(dev
, MTHCA_DB_TYPE_SRQ
, srq
->db_index
);
329 mthca_table_put(dev
, dev
->srq_table
.table
, srq
->srqn
);
330 mthca_free(&dev
->srq_table
.alloc
, srq
->srqn
);
331 mthca_free_mailbox(dev
, mailbox
);
334 void mthca_srq_event(struct mthca_dev
*dev
, u32 srqn
,
335 enum ib_event_type event_type
)
337 struct mthca_srq
*srq
;
338 struct ib_event event
;
340 spin_lock(&dev
->srq_table
.lock
);
341 srq
= mthca_array_get(&dev
->srq_table
.srq
, srqn
& (dev
->limits
.num_srqs
- 1));
343 atomic_inc(&srq
->refcount
);
344 spin_unlock(&dev
->srq_table
.lock
);
347 mthca_warn(dev
, "Async event for bogus SRQ %08x\n", srqn
);
351 if (!srq
->ibsrq
.event_handler
)
354 event
.device
= &dev
->ib_dev
;
355 event
.event
= event_type
;
356 event
.element
.srq
= &srq
->ibsrq
;
357 srq
->ibsrq
.event_handler(&event
, srq
->ibsrq
.srq_context
);
360 if (atomic_dec_and_test(&srq
->refcount
))
365 * This function must be called with IRQs disabled.
367 void mthca_free_srq_wqe(struct mthca_srq
*srq
, u32 wqe_addr
)
371 ind
= wqe_addr
>> srq
->wqe_shift
;
373 spin_lock(&srq
->lock
);
375 if (likely(srq
->first_free
>= 0))
376 *wqe_to_link(get_wqe(srq
, srq
->last_free
)) = ind
;
378 srq
->first_free
= ind
;
380 *wqe_to_link(get_wqe(srq
, ind
)) = -1;
381 srq
->last_free
= ind
;
383 spin_unlock(&srq
->lock
);
386 int mthca_tavor_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
387 struct ib_recv_wr
**bad_wr
)
389 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
390 struct mthca_srq
*srq
= to_msrq(ibsrq
);
401 spin_lock_irqsave(&srq
->lock
, flags
);
403 first_ind
= srq
->first_free
;
405 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
406 ind
= srq
->first_free
;
409 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
415 wqe
= get_wqe(srq
, ind
);
416 next_ind
= *wqe_to_link(wqe
);
417 prev_wqe
= srq
->last
;
420 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
421 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
422 /* flags field will always remain 0 */
424 wqe
+= sizeof (struct mthca_next_seg
);
426 if (unlikely(wr
->num_sge
> srq
->max_gs
)) {
429 srq
->last
= prev_wqe
;
433 for (i
= 0; i
< wr
->num_sge
; ++i
) {
434 ((struct mthca_data_seg
*) wqe
)->byte_count
=
435 cpu_to_be32(wr
->sg_list
[i
].length
);
436 ((struct mthca_data_seg
*) wqe
)->lkey
=
437 cpu_to_be32(wr
->sg_list
[i
].lkey
);
438 ((struct mthca_data_seg
*) wqe
)->addr
=
439 cpu_to_be64(wr
->sg_list
[i
].addr
);
440 wqe
+= sizeof (struct mthca_data_seg
);
443 if (i
< srq
->max_gs
) {
444 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
445 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
446 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
449 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
450 cpu_to_be32((ind
<< srq
->wqe_shift
) | 1);
452 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
453 cpu_to_be32(MTHCA_NEXT_DBD
);
455 srq
->wrid
[ind
] = wr
->wr_id
;
456 srq
->first_free
= next_ind
;
462 doorbell
[0] = cpu_to_be32(first_ind
<< srq
->wqe_shift
);
463 doorbell
[1] = cpu_to_be32((srq
->srqn
<< 8) | nreq
);
466 * Make sure that descriptors are written before
471 mthca_write64(doorbell
,
472 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
473 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
476 spin_unlock_irqrestore(&srq
->lock
, flags
);
480 int mthca_arbel_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
481 struct ib_recv_wr
**bad_wr
)
483 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
484 struct mthca_srq
*srq
= to_msrq(ibsrq
);
493 spin_lock_irqsave(&srq
->lock
, flags
);
495 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
496 ind
= srq
->first_free
;
499 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
505 wqe
= get_wqe(srq
, ind
);
506 next_ind
= *wqe_to_link(wqe
);
508 ((struct mthca_next_seg
*) wqe
)->nda_op
=
509 cpu_to_be32((next_ind
<< srq
->wqe_shift
) | 1);
510 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
511 /* flags field will always remain 0 */
513 wqe
+= sizeof (struct mthca_next_seg
);
515 if (unlikely(wr
->num_sge
> srq
->max_gs
)) {
521 for (i
= 0; i
< wr
->num_sge
; ++i
) {
522 ((struct mthca_data_seg
*) wqe
)->byte_count
=
523 cpu_to_be32(wr
->sg_list
[i
].length
);
524 ((struct mthca_data_seg
*) wqe
)->lkey
=
525 cpu_to_be32(wr
->sg_list
[i
].lkey
);
526 ((struct mthca_data_seg
*) wqe
)->addr
=
527 cpu_to_be64(wr
->sg_list
[i
].addr
);
528 wqe
+= sizeof (struct mthca_data_seg
);
531 if (i
< srq
->max_gs
) {
532 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
533 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
534 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
537 srq
->wrid
[ind
] = wr
->wr_id
;
538 srq
->first_free
= next_ind
;
542 srq
->counter
+= nreq
;
545 * Make sure that descriptors are written before
546 * we write doorbell record.
549 *srq
->db
= cpu_to_be32(srq
->counter
);
552 spin_unlock_irqrestore(&srq
->lock
, flags
);
556 int __devinit
mthca_init_srq_table(struct mthca_dev
*dev
)
560 if (!(dev
->mthca_flags
& MTHCA_FLAG_SRQ
))
563 spin_lock_init(&dev
->srq_table
.lock
);
565 err
= mthca_alloc_init(&dev
->srq_table
.alloc
,
566 dev
->limits
.num_srqs
,
567 dev
->limits
.num_srqs
- 1,
568 dev
->limits
.reserved_srqs
);
572 err
= mthca_array_init(&dev
->srq_table
.srq
,
573 dev
->limits
.num_srqs
);
575 mthca_alloc_cleanup(&dev
->srq_table
.alloc
);
580 void __devexit
mthca_cleanup_srq_table(struct mthca_dev
*dev
)
582 if (!(dev
->mthca_flags
& MTHCA_FLAG_SRQ
))
585 mthca_array_cleanup(&dev
->srq_table
.srq
, dev
->limits
.num_srqs
);
586 mthca_alloc_cleanup(&dev
->srq_table
.alloc
);