4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 QLogic Corporation. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
33 #include <sys/sunddi.h>
34 #include <sys/modctl.h>
36 #include <sys/stmf_defines.h>
37 #include <sys/fct_defines.h>
39 #include <sys/portif.h>
46 * Local Function Prototypes.
49 qlt_dma_free_handles(qlt_state_t
*qlt
, qlt_dma_handle_t
*first_handle
);
51 #define BUF_COUNT_2K 2048
52 #define BUF_COUNT_8K 512
53 #define BUF_COUNT_64K 256
54 #define BUF_COUNT_128K 1024
55 #define BUF_COUNT_256K 8
57 #define QLT_DMEM_MAX_BUF_SIZE (4 * 65536)
58 #define QLT_DMEM_NBUCKETS 5
59 static qlt_dmem_bucket_t bucket2K
= { 2048, BUF_COUNT_2K
},
60 bucket8K
= { 8192, BUF_COUNT_8K
},
61 bucket64K
= { 65536, BUF_COUNT_64K
},
62 bucket128k
= { (2 * 65536), BUF_COUNT_128K
},
63 bucket256k
= { (4 * 65536), BUF_COUNT_256K
};
65 static qlt_dmem_bucket_t
*dmem_buckets
[] = { &bucket2K
, &bucket8K
,
66 &bucket64K
, &bucket128k
, &bucket256k
, NULL
};
67 static ddi_device_acc_attr_t acc
;
68 static ddi_dma_attr_t qlt_scsi_dma_attr
= {
69 DMA_ATTR_V0
, /* dma_attr_version */
70 0, /* low DMA address range */
71 0xffffffffffffffff, /* high DMA address range */
72 0xffffffff, /* DMA counter register */
73 8192, /* DMA address alignment */
74 0xff, /* DMA burstsizes */
75 1, /* min effective DMA size */
76 0xffffffff, /* max DMA xfer size */
77 0xffffffff, /* segment boundary */
78 1, /* s/g list length */
79 1, /* granularity of device */
80 0 /* DMA transfer flags */
84 qlt_dmem_init(qlt_state_t
*qlt
)
87 qlt_dmem_bctl_t
*bctl
, *bc
;
88 qlt_dmem_bctl_t
*prev
;
94 ddi_dma_cookie_t cookie
;
99 if (qlt
->qlt_bucketcnt
[0] != 0) {
100 bucket2K
.dmem_nbufs
= qlt
->qlt_bucketcnt
[0];
102 if (qlt
->qlt_bucketcnt
[1] != 0) {
103 bucket8K
.dmem_nbufs
= qlt
->qlt_bucketcnt
[1];
105 if (qlt
->qlt_bucketcnt
[2] != 0) {
106 bucket64K
.dmem_nbufs
= qlt
->qlt_bucketcnt
[2];
108 if (qlt
->qlt_bucketcnt
[3] != 0) {
109 bucket128k
.dmem_nbufs
= qlt
->qlt_bucketcnt
[3];
111 if (qlt
->qlt_bucketcnt
[4] != 0) {
112 bucket256k
.dmem_nbufs
= qlt
->qlt_bucketcnt
[4];
115 bsize
= sizeof (dmem_buckets
);
116 ndx
= (int)(bsize
/ sizeof (void *));
118 * The reason it is ndx - 1 everywhere is becasue the last bucket
121 qlt
->dmem_buckets
= (qlt_dmem_bucket_t
**)kmem_zalloc(bsize
+
122 ((ndx
- 1) * (int)sizeof (qlt_dmem_bucket_t
)), KM_SLEEP
);
123 for (i
= 0; i
< (ndx
- 1); i
++) {
124 qlt
->dmem_buckets
[i
] = (qlt_dmem_bucket_t
*)
125 ((uint8_t *)qlt
->dmem_buckets
+ bsize
+
126 (i
* (int)sizeof (qlt_dmem_bucket_t
)));
127 bcopy(dmem_buckets
[i
], qlt
->dmem_buckets
[i
],
128 sizeof (qlt_dmem_bucket_t
));
130 bzero(&acc
, sizeof (acc
));
131 acc
.devacc_attr_version
= DDI_DEVICE_ATTR_V0
;
132 acc
.devacc_attr_endian_flags
= DDI_NEVERSWAP_ACC
;
133 acc
.devacc_attr_dataorder
= DDI_STRICTORDER_ACC
;
134 for (ndx
= 0; (p
= qlt
->dmem_buckets
[ndx
]) != NULL
; ndx
++) {
135 bctl
= (qlt_dmem_bctl_t
*)kmem_zalloc(p
->dmem_nbufs
*
136 sizeof (qlt_dmem_bctl_t
), KM_NOSLEEP
);
138 EL(qlt
, "bctl==NULL\n");
139 goto alloc_bctl_failed
;
141 p
->dmem_bctls_mem
= bctl
;
142 mutex_init(&p
->dmem_lock
, NULL
, MUTEX_DRIVER
, NULL
);
143 if ((i
= ddi_dma_alloc_handle(qlt
->dip
, &qlt_scsi_dma_attr
,
144 DDI_DMA_SLEEP
, 0, &p
->dmem_dma_handle
)) != DDI_SUCCESS
) {
145 EL(qlt
, "ddi_dma_alloc_handle status=%xh\n", i
);
146 goto alloc_handle_failed
;
149 total_mem
= p
->dmem_buf_size
* p
->dmem_nbufs
;
151 if ((i
= ddi_dma_mem_alloc(p
->dmem_dma_handle
, total_mem
, &acc
,
152 DDI_DMA_STREAMING
, DDI_DMA_DONTWAIT
, 0, (caddr_t
*)&addr
,
153 &len
, &p
->dmem_acc_handle
)) != DDI_SUCCESS
) {
154 EL(qlt
, "ddi_dma_mem_alloc status=%xh\n", i
);
155 goto mem_alloc_failed
;
158 if ((i
= ddi_dma_addr_bind_handle(p
->dmem_dma_handle
, NULL
,
159 (caddr_t
)addr
, total_mem
, DDI_DMA_RDWR
| DDI_DMA_STREAMING
,
160 DDI_DMA_DONTWAIT
, 0, &cookie
, &ncookie
)) != DDI_SUCCESS
) {
161 EL(qlt
, "ddi_dma_addr_bind_handle status=%xh\n", i
);
162 goto addr_bind_handle_failed
;
165 EL(qlt
, "ncookie=%d\n", ncookie
);
166 goto dmem_init_failed
;
169 p
->dmem_host_addr
= host_addr
= addr
;
170 p
->dmem_dev_addr
= dev_addr
= (uint64_t)cookie
.dmac_laddress
;
171 bsize
= p
->dmem_buf_size
;
172 p
->dmem_bctl_free_list
= bctl
;
173 p
->dmem_nbufs_free
= p
->dmem_nbufs
;
174 for (i
= 0; i
< p
->dmem_nbufs
; i
++) {
177 bctl
->bctl_bucket
= p
;
178 bctl
->bctl_buf
= db
= stmf_alloc(STMF_STRUCT_DATA_BUF
,
180 db
->db_port_private
= bctl
;
181 db
->db_sglist
[0].seg_addr
= host_addr
;
182 bctl
->bctl_dev_addr
= dev_addr
;
183 db
->db_sglist
[0].seg_length
= db
->db_buf_size
= bsize
;
184 db
->db_sglist_length
= 1;
188 prev
->bctl_next
= bctl
;
190 prev
->bctl_next
= NULL
;
193 return (QLT_SUCCESS
);
198 stmf_free(bc
->bctl_buf
);
202 (void) ddi_dma_unbind_handle(p
->dmem_dma_handle
);
203 addr_bind_handle_failed
:;
204 ddi_dma_mem_free(&p
->dmem_acc_handle
);
206 ddi_dma_free_handle(&p
->dmem_dma_handle
);
207 alloc_handle_failed
:;
208 kmem_free(p
->dmem_bctls_mem
, p
->dmem_nbufs
* sizeof (qlt_dmem_bctl_t
));
209 mutex_destroy(&p
->dmem_lock
);
212 p
= qlt
->dmem_buckets
[ndx
];
213 bctl
= p
->dmem_bctl_free_list
;
214 goto dmem_failure_loop
;
216 kmem_free(qlt
->dmem_buckets
, sizeof (dmem_buckets
) +
217 ((sizeof (dmem_buckets
)/sizeof (void *))
218 *sizeof (qlt_dmem_bucket_t
)));
219 qlt
->dmem_buckets
= NULL
;
221 return (QLT_FAILURE
);
225 qlt_dma_handle_pool_init(qlt_state_t
*qlt
)
227 qlt_dma_handle_pool_t
*pool
;
229 pool
= kmem_zalloc(sizeof (*pool
), KM_SLEEP
);
230 mutex_init(&pool
->pool_lock
, NULL
, MUTEX_DRIVER
, NULL
);
231 qlt
->qlt_dma_handle_pool
= pool
;
235 qlt_dma_handle_pool_fini(qlt_state_t
*qlt
)
237 qlt_dma_handle_pool_t
*pool
;
238 qlt_dma_handle_t
*handle
, *next_handle
;
240 pool
= qlt
->qlt_dma_handle_pool
;
241 mutex_enter(&pool
->pool_lock
);
243 * XXX Need to wait for free == total elements
244 * XXX Not sure how other driver shutdown stuff is done.
246 ASSERT(pool
->num_free
== pool
->num_total
);
247 if (pool
->num_free
!= pool
->num_total
)
249 "num_free %d != num_total %d\n",
250 pool
->num_free
, pool
->num_total
);
251 handle
= pool
->free_list
;
253 next_handle
= handle
->next
;
254 kmem_free(handle
, sizeof (*handle
));
255 handle
= next_handle
;
257 qlt
->qlt_dma_handle_pool
= NULL
;
258 mutex_destroy(&pool
->pool_lock
);
259 kmem_free(pool
, sizeof (*pool
));
263 qlt_dmem_fini(qlt_state_t
*qlt
)
265 qlt_dmem_bucket_t
*p
;
266 qlt_dmem_bctl_t
*bctl
;
269 for (ndx
= 0; (p
= qlt
->dmem_buckets
[ndx
]) != NULL
; ndx
++) {
270 bctl
= p
->dmem_bctl_free_list
;
272 stmf_free(bctl
->bctl_buf
);
273 bctl
= bctl
->bctl_next
;
275 bctl
= p
->dmem_bctl_free_list
;
276 (void) ddi_dma_unbind_handle(p
->dmem_dma_handle
);
277 ddi_dma_mem_free(&p
->dmem_acc_handle
);
278 ddi_dma_free_handle(&p
->dmem_dma_handle
);
279 kmem_free(p
->dmem_bctls_mem
,
280 p
->dmem_nbufs
* sizeof (qlt_dmem_bctl_t
));
281 mutex_destroy(&p
->dmem_lock
);
283 kmem_free(qlt
->dmem_buckets
, sizeof (dmem_buckets
) +
284 (((sizeof (dmem_buckets
)/sizeof (void *))-1)*
285 sizeof (qlt_dmem_bucket_t
)));
286 qlt
->dmem_buckets
= NULL
;
290 qlt_dmem_alloc(fct_local_port_t
*port
, uint32_t size
, uint32_t *pminsize
,
293 return (qlt_i_dmem_alloc((qlt_state_t
*)
294 port
->port_fca_private
, size
, pminsize
,
300 qlt_i_dmem_alloc(qlt_state_t
*qlt
, uint32_t size
, uint32_t *pminsize
,
303 qlt_dmem_bucket_t
*p
;
304 qlt_dmem_bctl_t
*bctl
;
306 uint32_t size_possible
= 0;
308 if (size
> QLT_DMEM_MAX_BUF_SIZE
) {
309 goto qlt_try_partial_alloc
;
312 /* 1st try to do a full allocation */
313 for (i
= 0; (p
= qlt
->dmem_buckets
[i
]) != NULL
; i
++) {
314 if (p
->dmem_buf_size
>= size
) {
315 if (p
->dmem_nbufs_free
) {
316 mutex_enter(&p
->dmem_lock
);
317 bctl
= p
->dmem_bctl_free_list
;
319 mutex_exit(&p
->dmem_lock
);
322 p
->dmem_bctl_free_list
=
324 p
->dmem_nbufs_free
--;
325 qlt
->qlt_bufref
[i
]++;
326 mutex_exit(&p
->dmem_lock
);
327 bctl
->bctl_buf
->db_data_size
= size
;
328 return (bctl
->bctl_buf
);
330 qlt
->qlt_bumpbucket
++;
335 qlt_try_partial_alloc
:
339 /* Now go from high to low */
340 for (i
= QLT_DMEM_NBUCKETS
- 1; i
>= 0; i
--) {
341 p
= qlt
->dmem_buckets
[i
];
342 if (p
->dmem_nbufs_free
== 0)
344 if (!size_possible
) {
345 size_possible
= p
->dmem_buf_size
;
347 if (*pminsize
> p
->dmem_buf_size
) {
348 /* At this point we know the request is failing. */
351 * This caller is asking too much. We already
352 * know what we can give, so get out.
357 * Lets continue to find out and tell what
363 mutex_enter(&p
->dmem_lock
);
364 if (*pminsize
<= p
->dmem_buf_size
) {
365 bctl
= p
->dmem_bctl_free_list
;
367 /* Someone took it. */
369 mutex_exit(&p
->dmem_lock
);
372 p
->dmem_bctl_free_list
= bctl
->bctl_next
;
373 p
->dmem_nbufs_free
--;
374 mutex_exit(&p
->dmem_lock
);
375 bctl
->bctl_buf
->db_data_size
= p
->dmem_buf_size
;
377 return (bctl
->bctl_buf
);
381 *pminsize
= size_possible
;
388 qlt_i_dmem_free(qlt_state_t
*qlt
, stmf_data_buf_t
*dbuf
)
390 qlt_dmem_free(0, dbuf
);
395 qlt_dmem_free(fct_dbuf_store_t
*fds
, stmf_data_buf_t
*dbuf
)
397 qlt_dmem_bctl_t
*bctl
;
398 qlt_dmem_bucket_t
*p
;
400 ASSERT((dbuf
->db_flags
& DB_LU_DATA_BUF
) == 0);
402 bctl
= (qlt_dmem_bctl_t
*)dbuf
->db_port_private
;
403 p
= bctl
->bctl_bucket
;
404 mutex_enter(&p
->dmem_lock
);
405 bctl
->bctl_next
= p
->dmem_bctl_free_list
;
406 p
->dmem_bctl_free_list
= bctl
;
407 p
->dmem_nbufs_free
++;
408 mutex_exit(&p
->dmem_lock
);
412 qlt_dmem_dma_sync(stmf_data_buf_t
*dbuf
, uint_t sync_type
)
414 qlt_dmem_bctl_t
*bctl
;
416 qlt_dmem_bucket_t
*p
;
417 qlt_dma_handle_t
*th
;
420 if (dbuf
->db_flags
& DB_LU_DATA_BUF
) {
422 * go through ddi handle list
424 qsgl
= (qlt_dma_sgl_t
*)dbuf
->db_port_private
;
425 th
= qsgl
->handle_list
;
427 rv
= ddi_dma_sync(th
->dma_handle
,
429 if (rv
!= DDI_SUCCESS
) {
430 cmn_err(CE_WARN
, "ddi_dma_sync FAILED\n");
435 bctl
= (qlt_dmem_bctl_t
*)dbuf
->db_port_private
;
436 p
= bctl
->bctl_bucket
;
437 (void) ddi_dma_sync(p
->dmem_dma_handle
, (off_t
)
438 (bctl
->bctl_dev_addr
- p
->dmem_dev_addr
),
439 dbuf
->db_data_size
, sync_type
);
444 * A very lite version of ddi_dma_addr_bind_handle()
447 qlt_ddi_vtop(caddr_t vaddr
)
449 uint64_t offset
, paddr
;
452 pfn
= hat_getpfnum(kas
.a_hat
, vaddr
);
453 ASSERT(pfn
!= PFN_INVALID
&& pfn
!= PFN_SUSPENDED
);
454 offset
= ((uintptr_t)vaddr
) & MMU_PAGEOFFSET
;
455 paddr
= mmu_ptob(pfn
);
456 return (paddr
+offset
);
459 static ddi_dma_attr_t qlt_sgl_dma_attr
= {
460 DMA_ATTR_V0
, /* dma_attr_version */
461 0, /* low DMA address range */
462 0xffffffffffffffff, /* high DMA address range */
463 0xffffffff, /* DMA counter register */
464 64, /* DMA address alignment */
465 0xff, /* DMA burstsizes */
466 1, /* min effective DMA size */
467 0xffffffff, /* max DMA xfer size */
468 0xffffffff, /* segment boundary */
469 QLT_DMA_SG_LIST_LENGTH
, /* s/g list length */
470 1, /* granularity of device */
471 0 /* DMA transfer flags */
475 * Allocate a qlt_dma_handle container and fill it with a ddi_dma_handle
477 static qlt_dma_handle_t
*
478 qlt_dma_alloc_handle(qlt_state_t
*qlt
)
480 ddi_dma_handle_t ddi_handle
;
481 qlt_dma_handle_t
*qlt_handle
;
484 rv
= ddi_dma_alloc_handle(qlt
->dip
, &qlt_sgl_dma_attr
,
485 DDI_DMA_SLEEP
, 0, &ddi_handle
);
486 if (rv
!= DDI_SUCCESS
) {
487 EL(qlt
, "ddi_dma_alloc_handle status=%xh\n", rv
);
490 qlt_handle
= kmem_zalloc(sizeof (qlt_dma_handle_t
), KM_SLEEP
);
491 qlt_handle
->dma_handle
= ddi_handle
;
496 * Allocate a list of qlt_dma_handle containers from the free list
498 static qlt_dma_handle_t
*
499 qlt_dma_alloc_handle_list(qlt_state_t
*qlt
, int handle_count
)
501 qlt_dma_handle_pool_t
*pool
;
502 qlt_dma_handle_t
*tmp_handle
, *first_handle
, *last_handle
;
506 * Make sure the free list can satisfy the request.
507 * Once the free list is primed, it should satisfy most requests.
508 * XXX Should there be a limit on pool size?
510 pool
= qlt
->qlt_dma_handle_pool
;
511 mutex_enter(&pool
->pool_lock
);
512 while (handle_count
> pool
->num_free
) {
513 mutex_exit(&pool
->pool_lock
);
514 if ((tmp_handle
= qlt_dma_alloc_handle(qlt
)) == NULL
)
516 mutex_enter(&pool
->pool_lock
);
517 tmp_handle
->next
= pool
->free_list
;
518 pool
->free_list
= tmp_handle
;
524 * The free list lock is held and the list is large enough to
525 * satisfy this request. Run down the freelist and snip off
526 * the number of elements needed for this request.
528 first_handle
= pool
->free_list
;
529 tmp_handle
= first_handle
;
530 for (i
= 0; i
< handle_count
; i
++) {
531 last_handle
= tmp_handle
;
532 tmp_handle
= tmp_handle
->next
;
534 pool
->free_list
= tmp_handle
;
535 pool
->num_free
-= handle_count
;
536 mutex_exit(&pool
->pool_lock
);
537 last_handle
->next
= NULL
; /* sanity */
538 return (first_handle
);
542 * Return a list of qlt_dma_handle containers to the free list.
545 qlt_dma_free_handles(qlt_state_t
*qlt
, qlt_dma_handle_t
*first_handle
)
547 qlt_dma_handle_pool_t
*pool
;
548 qlt_dma_handle_t
*tmp_handle
, *last_handle
;
549 int rv
, handle_count
;
552 * Traverse the list and unbind the handles
554 ASSERT(first_handle
);
555 tmp_handle
= first_handle
;
557 while (tmp_handle
!= NULL
) {
558 last_handle
= tmp_handle
;
560 * If the handle is bound, unbind the handle so it can be
561 * reused. It may not be bound if there was a bind failure.
563 if (tmp_handle
->num_cookies
!= 0) {
564 rv
= ddi_dma_unbind_handle(tmp_handle
->dma_handle
);
565 ASSERT(rv
== DDI_SUCCESS
);
566 tmp_handle
->num_cookies
= 0;
567 tmp_handle
->num_cookies_fetched
= 0;
569 tmp_handle
= tmp_handle
->next
;
573 * Insert this list into the free list
575 pool
= qlt
->qlt_dma_handle_pool
;
576 mutex_enter(&pool
->pool_lock
);
577 last_handle
->next
= pool
->free_list
;
578 pool
->free_list
= first_handle
;
579 pool
->num_free
+= handle_count
;
580 mutex_exit(&pool
->pool_lock
);
584 * cookies produced by mapping this dbuf
587 qlt_get_cookie_count(stmf_data_buf_t
*dbuf
)
589 qlt_dma_sgl_t
*qsgl
= dbuf
->db_port_private
;
591 ASSERT(dbuf
->db_flags
& DB_LU_DATA_BUF
);
592 return (qsgl
->cookie_count
);
596 *qlt_get_cookie_array(stmf_data_buf_t
*dbuf
)
598 qlt_dma_sgl_t
*qsgl
= dbuf
->db_port_private
;
600 ASSERT(dbuf
->db_flags
& DB_LU_DATA_BUF
);
602 if (qsgl
->cookie_prefetched
)
603 return (&qsgl
->cookie
[0]);
609 * Wrapper around ddi_dma_nextcookie that hides the ddi_dma_handle usage.
612 qlt_ddi_dma_nextcookie(stmf_data_buf_t
*dbuf
, ddi_dma_cookie_t
*cookiep
)
614 qlt_dma_sgl_t
*qsgl
= dbuf
->db_port_private
;
616 ASSERT(dbuf
->db_flags
& DB_LU_DATA_BUF
);
618 if (qsgl
->cookie_prefetched
) {
619 ASSERT(qsgl
->cookie_next_fetch
< qsgl
->cookie_count
);
620 *cookiep
= qsgl
->cookie
[qsgl
->cookie_next_fetch
++];
622 qlt_dma_handle_t
*fetch
;
623 qlt_dma_handle_t
*FETCH_DONE
= (qlt_dma_handle_t
*)0xbad;
625 ASSERT(qsgl
->handle_list
!= NULL
);
626 ASSERT(qsgl
->handle_next_fetch
!= FETCH_DONE
);
628 fetch
= qsgl
->handle_next_fetch
;
629 if (fetch
->num_cookies_fetched
== 0) {
630 *cookiep
= fetch
->first_cookie
;
632 ddi_dma_nextcookie(fetch
->dma_handle
, cookiep
);
634 if (++fetch
->num_cookies_fetched
== fetch
->num_cookies
) {
635 if (fetch
->next
== NULL
)
636 qsgl
->handle_next_fetch
= FETCH_DONE
;
638 qsgl
->handle_next_fetch
= fetch
->next
;
644 * Set this flag to fetch the DDI dma cookies from the handles here and
645 * store them in the port private area of the dbuf. This will allow
646 * faster access to the cookies in qlt_xfer_scsi_data() at the expense of
647 * an extra copy. If the qlt->req_lock is hot, this may help.
649 int qlt_sgl_prefetch
= 0;
653 qlt_dma_setup_dbuf(fct_local_port_t
*port
, stmf_data_buf_t
*dbuf
,
656 qlt_state_t
*qlt
= port
->port_fca_private
;
658 struct stmf_sglist_ent
*sglp
;
659 qlt_dma_handle_t
*handle_list
, *th
;
661 ddi_dma_cookie_t
*cookie_p
;
662 int cookie_count
, numbufs
;
668 * get dma handle list from cache - one per sglist entry
669 * foreach sglist entry
670 * bind dma handle to sglist vaddr
671 * allocate space for DMA state to store in db_port_private
672 * fill in port private object
674 * move all dma cookies into db_port_private
676 dbuf
->db_port_private
= NULL
;
677 numbufs
= dbuf
->db_sglist_length
;
678 handle_list
= qlt_dma_alloc_handle_list(qlt
, numbufs
);
679 if (handle_list
== NULL
) {
680 EL(qlt
, "handle_list==NULL\n");
681 return (STMF_FAILURE
);
684 * Loop through sglist and bind each entry to a handle
687 sglp
= &dbuf
->db_sglist
[0];
689 for (i
= 0; i
< numbufs
; i
++, sglp
++) {
692 * Bind this sgl entry to a DDI dma handle
694 if ((rv
= ddi_dma_addr_bind_handle(
697 (caddr_t
)(sglp
->seg_addr
),
698 (size_t)sglp
->seg_length
,
699 DDI_DMA_RDWR
| DDI_DMA_STREAMING
,
703 &th
->num_cookies
)) != DDI_DMA_MAPPED
) {
704 cmn_err(CE_NOTE
, "ddi_dma_addr_bind_handle %d", rv
);
705 qlt_dma_free_handles(qlt
, handle_list
);
706 return (STMF_FAILURE
);
710 * Add to total cookie count
712 cookie_count
+= th
->num_cookies
;
713 if (cookie_count
> QLT_DMA_SG_LIST_LENGTH
) {
715 * Request exceeds HBA limit
717 qlt_dma_free_handles(qlt
, handle_list
);
718 return (STMF_FAILURE
);
720 /* move to next ddi_dma_handle */
725 * Allocate our port private object for DMA mapping state.
727 prefetch
= qlt_sgl_prefetch
;
728 qsize
= sizeof (qlt_dma_sgl_t
);
730 /* one extra ddi_dma_cookie allocated for alignment padding */
731 qsize
+= cookie_count
* sizeof (ddi_dma_cookie_t
);
733 qsgl
= kmem_alloc(qsize
, KM_SLEEP
);
737 dbuf
->db_port_private
= qsgl
;
739 qsgl
->handle_count
= dbuf
->db_sglist_length
;
740 qsgl
->cookie_prefetched
= prefetch
;
741 qsgl
->cookie_count
= cookie_count
;
742 qsgl
->cookie_next_fetch
= 0;
743 qsgl
->handle_list
= handle_list
;
744 qsgl
->handle_next_fetch
= handle_list
;
747 * traverse handle list and move cookies to db_port_private
750 cookie_p
= &qsgl
->cookie
[0];
751 for (i
= 0; i
< numbufs
; i
++) {
752 uint_t cc
= th
->num_cookies
;
754 *cookie_p
++ = th
->first_cookie
;
756 ddi_dma_nextcookie(th
->dma_handle
, cookie_p
++);
758 th
->num_cookies_fetched
= th
->num_cookies
;
763 return (STMF_SUCCESS
);
767 qlt_dma_teardown_dbuf(fct_dbuf_store_t
*fds
, stmf_data_buf_t
*dbuf
)
769 qlt_state_t
*qlt
= fds
->fds_fca_private
;
770 qlt_dma_sgl_t
*qsgl
= dbuf
->db_port_private
;
774 ASSERT(dbuf
->db_flags
& DB_LU_DATA_BUF
);
777 * unbind and free the dma handles
779 if (qsgl
->handle_list
) {
780 /* go through ddi handle list */
781 qlt_dma_free_handles(qlt
, qsgl
->handle_list
);
783 kmem_free(qsgl
, qsgl
->qsize
);
787 qlt_get_iocb_count(uint32_t cookie_count
)
789 uint32_t cnt
, cont_segs
;
793 cnt
= CMD7_2400_DATA_SEGMENTS
;
794 cont_segs
= CONT_A64_DATA_SEGMENTS
;
796 if (cookie_count
> cnt
) {
797 cnt
= cookie_count
- cnt
;
798 iocb_count
= (uint8_t)(iocb_count
+ cnt
/ cont_segs
);
799 if (cnt
% cont_segs
) {