4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004-2011 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 * Copyright 2020 RackTop Systems, Inc.
31 /* #define EMLXS_POOL_DEBUG */
33 EMLXS_MSG_DEF(EMLXS_MEM_C
);
36 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t
*hba
, MEMSEG
*seg
,
38 static void emlxs_mem_pool_free(emlxs_hba_t
*hba
, MEMSEG
*seg
, uint32_t count
);
42 emlxs_mem_alloc_buffer(emlxs_hba_t
*hba
)
44 emlxs_port_t
*port
= &PPORT
;
56 bzero(hba
->memseg
, sizeof (hba
->memseg
));
58 /* Allocate the fc_table */
59 bzero(buf_info
, sizeof (MBUF_INFO
));
60 buf_info
->size
= (hba
->max_iotag
* sizeof (emlxs_buf_t
*));
62 (void) emlxs_mem_alloc(hba
, buf_info
);
63 if (buf_info
->virt
== NULL
) {
65 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
70 hba
->fc_table
= buf_info
->virt
;
71 bzero(hba
->fc_table
, buf_info
->size
);
73 /* Prepare the memory pools */
74 for (i
= 0; i
< FC_MAX_SEG
; i
++) {
75 seg
= &hba
->memseg
[i
];
79 (void) strlcpy(seg
->fc_label
, "Node Pool",
80 sizeof (seg
->fc_label
));
81 seg
->fc_memtag
= MEM_NLP
;
82 seg
->fc_memsize
= sizeof (NODELIST
);
83 seg
->fc_hi_water
= hba
->max_nodes
+ 2;
89 (void) strlcpy(seg
->fc_label
, "IOCB Pool",
90 sizeof (seg
->fc_label
));
91 seg
->fc_memtag
= MEM_IOCB
;
92 seg
->fc_memsize
= sizeof (IOCBQ
);
93 seg
->fc_hi_water
= cfg
[CFG_NUM_IOCBS
].current
;
94 seg
->fc_lo_water
= cfg
[CFG_NUM_IOCBS
].low
;
95 seg
->fc_step
= cfg
[CFG_NUM_IOCBS
].low
;
99 (void) strlcpy(seg
->fc_label
, "MBOX Pool",
100 sizeof (seg
->fc_label
));
101 seg
->fc_memtag
= MEM_MBOX
;
102 seg
->fc_memsize
= sizeof (MAILBOXQ
);
103 seg
->fc_hi_water
= hba
->max_nodes
+ 32;
104 seg
->fc_lo_water
= 32;
109 if (hba
->model_info
.sli_mask
& EMLXS_SLI4_MASK
) {
112 (void) strlcpy(seg
->fc_label
, "BPL Pool",
113 sizeof (seg
->fc_label
));
114 seg
->fc_memtag
= MEM_BPL
;
115 seg
->fc_memsize
= hba
->sli
.sli3
.mem_bpl_size
;
116 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
117 seg
->fc_memalign
= 32;
118 seg
->fc_hi_water
= hba
->max_iotag
;
119 seg
->fc_lo_water
= cfg
[CFG_NUM_IOCBS
].low
;
120 seg
->fc_step
= cfg
[CFG_NUM_IOCBS
].low
;
124 /* These are the unsolicited ELS buffers. */
125 (void) strlcpy(seg
->fc_label
, "BUF Pool",
126 sizeof (seg
->fc_label
));
127 seg
->fc_memtag
= MEM_BUF
;
128 seg
->fc_memsize
= MEM_BUF_SIZE
;
129 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
130 seg
->fc_memalign
= 32;
131 seg
->fc_hi_water
= MEM_ELSBUF_COUNT
+ MEM_BUF_COUNT
;
132 seg
->fc_lo_water
= MEM_ELSBUF_COUNT
;
137 /* These are the unsolicited IP buffers. */
138 if (cfg
[CFG_NETWORK_ON
].current
== 0) {
142 (void) strlcpy(seg
->fc_label
, "IPBUF Pool",
143 sizeof (seg
->fc_label
));
144 seg
->fc_memtag
= MEM_IPBUF
;
145 seg
->fc_memsize
= MEM_IPBUF_SIZE
;
146 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
147 seg
->fc_memalign
= 32;
148 seg
->fc_hi_water
= MEM_IPBUF_COUNT
;
149 seg
->fc_lo_water
= 0;
154 /* These are the unsolicited CT buffers. */
155 (void) strlcpy(seg
->fc_label
, "CTBUF Pool",
156 sizeof (seg
->fc_label
));
157 seg
->fc_memtag
= MEM_CTBUF
;
158 seg
->fc_memsize
= MEM_CTBUF_SIZE
;
159 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
160 seg
->fc_memalign
= 32;
161 seg
->fc_hi_water
= MEM_CTBUF_COUNT
;
162 seg
->fc_lo_water
= MEM_CTBUF_COUNT
;
167 (void) strlcpy(seg
->fc_label
, "1K SGL Pool",
168 sizeof (seg
->fc_label
));
169 seg
->fc_memtag
= MEM_SGL1K
;
170 seg
->fc_memsize
= 0x400;
171 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
172 seg
->fc_memalign
= 32;
173 seg
->fc_hi_water
= 0x5000;
174 seg
->fc_lo_water
= 0;
175 seg
->fc_step
= 0x100;
179 (void) strlcpy(seg
->fc_label
, "2K SGL Pool",
180 sizeof (seg
->fc_label
));
181 seg
->fc_memtag
= MEM_SGL2K
;
182 seg
->fc_memsize
= 0x800;
183 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
184 seg
->fc_memalign
= 32;
185 seg
->fc_hi_water
= 0x5000;
186 seg
->fc_lo_water
= 0;
187 seg
->fc_step
= 0x100;
191 (void) strlcpy(seg
->fc_label
, "4K SGL Pool",
192 sizeof (seg
->fc_label
));
193 seg
->fc_memtag
= MEM_SGL4K
;
194 seg
->fc_memsize
= 0x1000;
195 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
196 seg
->fc_memalign
= 32;
197 seg
->fc_hi_water
= 0x5000;
198 seg
->fc_lo_water
= 0;
199 seg
->fc_step
= 0x100;
204 /* These are the unsolicited FCT buffers. */
205 if (!(port
->flag
& EMLXS_TGT_ENABLED
)) {
209 (void) strlcpy(seg
->fc_label
, "FCTBUF Pool",
210 sizeof (seg
->fc_label
));
211 seg
->fc_memtag
= MEM_FCTBUF
;
212 seg
->fc_memsize
= MEM_FCTBUF_SIZE
;
213 seg
->fc_memflag
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
214 seg
->fc_memalign
= 32;
215 seg
->fc_hi_water
= MEM_FCTBUF_COUNT
;
216 seg
->fc_lo_water
= 0;
219 #endif /* SFCT_SUPPORT */
225 if (seg
->fc_memsize
== 0) {
229 (void) emlxs_mem_pool_create(hba
, seg
);
231 if (seg
->fc_numblks
< seg
->fc_lo_water
) {
232 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
233 "%s: count=%d size=%d flags=%x lo=%d hi=%d",
234 seg
->fc_label
, seg
->fc_numblks
,
235 seg
->fc_memsize
, seg
->fc_memflag
, seg
->fc_lo_water
,
242 hba
->sli
.sli3
.bpl_table
= NULL
;
243 seg
= &hba
->memseg
[MEM_BPL
];
245 /* If SLI3 and MEM_BPL pool is static */
246 if (!(hba
->model_info
.sli_mask
& EMLXS_SLI4_MASK
) &&
247 !(seg
->fc_memflag
& FC_MEMSEG_DYNAMIC
)) {
249 * Allocate and Initialize bpl_table
250 * This is for increased performance.
252 bzero(buf_info
, sizeof (MBUF_INFO
));
253 buf_info
->size
= hba
->max_iotag
* sizeof (MATCHMAP
*);
255 (void) emlxs_mem_alloc(hba
, buf_info
);
256 if (buf_info
->virt
== NULL
) {
258 EMLXS_MSGF(EMLXS_CONTEXT
,
259 &emlxs_mem_alloc_failed_msg
,
260 "BPL table buffer.");
264 hba
->sli
.sli3
.bpl_table
= buf_info
->virt
;
266 bpl_table
= (MATCHMAP
**)hba
->sli
.sli3
.bpl_table
;
267 for (i
= 0; i
< hba
->max_iotag
; i
++) {
268 mp
= (MATCHMAP
*) emlxs_mem_get(hba
, MEM_BPL
);
269 mp
->flag
|= MAP_TABLE_ALLOCATED
;
278 (void) emlxs_mem_free_buffer(hba
);
281 } /* emlxs_mem_alloc_buffer() */
285 * emlxs_mem_free_buffer
287 * This routine will free iocb/data buffer space
291 emlxs_mem_free_buffer(emlxs_hba_t
*hba
)
293 emlxs_port_t
*port
= &PPORT
;
301 MATCHMAP
**bpl_table
;
305 for (j
= 0; j
< hba
->chan_count
; j
++) {
309 (void) emlxs_tx_channel_flush(hba
, cp
, 0);
312 if (!(hba
->model_info
.sli_mask
& EMLXS_SLI4_MASK
)) {
313 /* free the mapped address match area for each ring */
314 for (j
= 0; j
< MAX_RINGS
; j
++) {
315 rp
= &hba
->sli
.sli3
.ring
[j
];
317 while (rp
->fc_mpoff
) {
321 mp
= (MATCHMAP
*)(rp
->fc_mpoff
);
323 if ((j
== hba
->channel_els
) ||
324 (j
== hba
->channel_ct
) ||
326 (j
== hba
->CHANNEL_FCT
) ||
327 #endif /* SFCT_SUPPORT */
328 (j
== hba
->channel_ip
)) {
332 if ((mp
= emlxs_mem_get_vaddr(hba
, rp
, addr
))) {
333 if (j
== hba
->channel_els
) {
335 MEM_ELSBUF
, (void *)mp
);
336 } else if (j
== hba
->channel_ct
) {
338 MEM_CTBUF
, (void *)mp
);
339 } else if (j
== hba
->channel_ip
) {
341 MEM_IPBUF
, (void *)mp
);
344 else if (j
== hba
->CHANNEL_FCT
) {
346 MEM_FCTBUF
, (void *)mp
);
348 #endif /* SFCT_SUPPORT */
355 if (hba
->flag
& FC_HBQ_ENABLED
) {
356 emlxs_hbq_free_all(hba
, EMLXS_ELS_HBQ_ID
);
357 emlxs_hbq_free_all(hba
, EMLXS_IP_HBQ_ID
);
358 emlxs_hbq_free_all(hba
, EMLXS_CT_HBQ_ID
);
360 if (port
->flag
& EMLXS_TGT_ENABLED
) {
361 emlxs_hbq_free_all(hba
, EMLXS_FCT_HBQ_ID
);
366 for (j
= 0; j
< MAX_VPORTS
; j
++) {
368 if (vport
->node_count
) {
369 emlxs_node_destroy_all(vport
);
373 /* Make sure the mailbox queue is empty */
377 bzero(buf_info
, sizeof (MBUF_INFO
));
378 buf_info
->size
= hba
->max_iotag
* sizeof (emlxs_buf_t
*);
379 buf_info
->virt
= hba
->fc_table
;
380 emlxs_mem_free(hba
, buf_info
);
381 hba
->fc_table
= NULL
;
384 if (hba
->sli
.sli3
.bpl_table
) {
385 /* Return MEM_BPLs to their pool */
386 bpl_table
= (MATCHMAP
**)hba
->sli
.sli3
.bpl_table
;
387 for (j
= 0; j
< hba
->max_iotag
; j
++) {
389 mp
->flag
&= ~MAP_TABLE_ALLOCATED
;
390 emlxs_mem_put(hba
, MEM_BPL
, (void*)mp
);
393 bzero(buf_info
, sizeof (MBUF_INFO
));
394 buf_info
->size
= hba
->max_iotag
* sizeof (MATCHMAP
*);
395 buf_info
->virt
= hba
->sli
.sli3
.bpl_table
;
396 emlxs_mem_free(hba
, buf_info
);
397 hba
->sli
.sli3
.bpl_table
= NULL
;
400 /* Free the memory segments */
401 for (j
= 0; j
< FC_MAX_SEG
; j
++) {
402 emlxs_mem_pool_destroy(hba
, &hba
->memseg
[j
]);
407 } /* emlxs_mem_free_buffer() */
410 /* Must hold EMLXS_MEMGET_LOCK when calling */
412 emlxs_mem_pool_alloc(emlxs_hba_t
*hba
, MEMSEG
*seg
, uint32_t count
)
414 emlxs_port_t
*port
= &PPORT
;
418 MBUF_INFO local_buf_info
;
422 if (seg
->fc_memsize
== 0) {
426 if (seg
->fc_numblks
>= seg
->fc_hi_water
) {
434 if (count
> (seg
->fc_hi_water
- seg
->fc_numblks
)) {
435 count
= (seg
->fc_hi_water
- seg
->fc_numblks
);
438 buf_info
= &local_buf_info
;
439 fc_numblks
= seg
->fc_numblks
;
441 /* Check for initial allocation */
442 if (!(seg
->fc_memflag
& FC_MEMSEG_PUT_ENABLED
)) {
443 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
444 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d f=%d:%d",
445 seg
->fc_label
, count
, seg
->fc_numblks
,
446 seg
->fc_memsize
, seg
->fc_memflag
,
447 seg
->fc_lo_water
, seg
->fc_hi_water
, seg
->fc_step
,
448 seg
->fc_memget_cnt
, seg
->fc_low
);
451 if (!(seg
->fc_memflag
& FC_MBUF_DMA
)) {
457 for (i
= 0; i
< count
; i
++) {
458 bzero(buf_info
, sizeof (MBUF_INFO
));
459 buf_info
->size
= sizeof (MATCHMAP
);
460 buf_info
->align
= sizeof (void *);
462 (void) emlxs_mem_alloc(hba
, buf_info
);
463 if (buf_info
->virt
== NULL
) {
464 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
465 "%s: count=%d size=%d",
466 seg
->fc_label
, seg
->fc_numblks
, seg
->fc_memsize
);
471 mp
= (MATCHMAP
*)buf_info
->virt
;
472 bzero(mp
, sizeof (MATCHMAP
));
474 bzero(buf_info
, sizeof (MBUF_INFO
));
475 buf_info
->size
= seg
->fc_memsize
;
476 buf_info
->flags
= seg
->fc_memflag
;
477 buf_info
->align
= seg
->fc_memalign
;
479 (void) emlxs_mem_alloc(hba
, buf_info
);
480 if (buf_info
->virt
== NULL
) {
481 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
482 "%s: count=%d size=%d",
483 seg
->fc_label
, seg
->fc_numblks
, seg
->fc_memsize
);
485 /* Free the mp object */
486 bzero(buf_info
, sizeof (MBUF_INFO
));
487 buf_info
->size
= sizeof (MATCHMAP
);
488 buf_info
->virt
= (void *)mp
;
489 emlxs_mem_free(hba
, buf_info
);
493 bp
= (uint8_t *)buf_info
->virt
;
494 bzero(bp
, seg
->fc_memsize
);
496 mp
->virt
= buf_info
->virt
;
497 mp
->phys
= buf_info
->phys
;
498 mp
->size
= buf_info
->size
;
499 mp
->dma_handle
= buf_info
->dma_handle
;
500 mp
->data_handle
= buf_info
->data_handle
;
501 mp
->tag
= seg
->fc_memtag
;
503 mp
->flag
|= MAP_POOL_ALLOCATED
;
506 if (mp
->tag
>= MEM_FCTSEG
) {
507 if (emlxs_fct_stmf_alloc(hba
, mp
)) {
508 /* Free the DMA memory itself */
509 emlxs_mem_free(hba
, buf_info
);
511 /* Free the mp object */
512 bzero(buf_info
, sizeof (MBUF_INFO
));
513 buf_info
->size
= sizeof (MATCHMAP
);
514 buf_info
->virt
= (void *)mp
;
515 emlxs_mem_free(hba
, buf_info
);
520 #endif /* SFCT_SUPPORT */
522 /* Add the buffer desc to the tail of the pool freelist */
523 if (seg
->fc_memget_end
== NULL
) {
524 seg
->fc_memget_ptr
= (uint8_t *)mp
;
525 seg
->fc_memget_cnt
= 1;
527 *((uint8_t **)(seg
->fc_memget_end
)) = (uint8_t *)mp
;
528 seg
->fc_memget_cnt
++;
530 seg
->fc_memget_end
= (uint8_t *)mp
;
533 seg
->fc_total_memsize
+= (seg
->fc_memsize
+ sizeof (MATCHMAP
));
540 for (i
= 0; i
< count
; i
++) {
541 bzero(buf_info
, sizeof (MBUF_INFO
));
542 buf_info
->size
= seg
->fc_memsize
;
544 (void) emlxs_mem_alloc(hba
, buf_info
);
545 if (buf_info
->virt
== NULL
) {
546 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
547 "%s: count=%d size=%d",
548 seg
->fc_label
, seg
->fc_numblks
, seg
->fc_memsize
);
552 bp
= (uint8_t *)buf_info
->virt
;
554 /* Add the buffer to the tail of the pool freelist */
555 if (seg
->fc_memget_end
== NULL
) {
556 seg
->fc_memget_ptr
= (uint8_t *)bp
;
557 seg
->fc_memget_cnt
= 1;
559 *((uint8_t **)(seg
->fc_memget_end
)) = (uint8_t *)bp
;
560 seg
->fc_memget_cnt
++;
562 seg
->fc_memget_end
= (uint8_t *)bp
;
565 seg
->fc_total_memsize
+= seg
->fc_memsize
;
570 return ((seg
->fc_numblks
- fc_numblks
));
572 } /* emlxs_mem_pool_alloc() */
575 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
577 emlxs_mem_pool_free(emlxs_hba_t
*hba
, MEMSEG
*seg
, uint32_t count
)
579 emlxs_port_t
*port
= &PPORT
;
583 MBUF_INFO local_buf_info
;
585 if ((seg
->fc_memsize
== 0) ||
586 (seg
->fc_numblks
== 0) ||
591 /* Check max count */
592 if (count
> seg
->fc_numblks
) {
593 count
= seg
->fc_numblks
;
596 /* Move memput list to memget list */
597 if (seg
->fc_memput_ptr
) {
598 if (seg
->fc_memget_end
== NULL
) {
599 seg
->fc_memget_ptr
= seg
->fc_memput_ptr
;
601 *((uint8_t **)(seg
->fc_memget_end
)) =\
604 seg
->fc_memget_end
= seg
->fc_memput_end
;
605 seg
->fc_memget_cnt
+= seg
->fc_memput_cnt
;
607 seg
->fc_memput_ptr
= NULL
;
608 seg
->fc_memput_end
= NULL
;
609 seg
->fc_memput_cnt
= 0;
612 buf_info
= &local_buf_info
;
614 /* Check for final deallocation */
615 if (!(seg
->fc_memflag
& FC_MEMSEG_GET_ENABLED
)) {
616 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
617 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
619 seg
->fc_label
, count
, seg
->fc_numblks
,
620 seg
->fc_memsize
, seg
->fc_memflag
, seg
->fc_lo_water
,
621 seg
->fc_hi_water
, seg
->fc_step
, seg
->fc_memget_cnt
,
625 if (!(seg
->fc_memflag
& FC_MBUF_DMA
)) {
631 /* Free memory associated with all buffers on get buffer pool */
632 while (count
&& ((bp
= seg
->fc_memget_ptr
) != NULL
)) {
633 /* Remove buffer from list */
634 if (seg
->fc_memget_end
== bp
) {
635 seg
->fc_memget_ptr
= NULL
;
636 seg
->fc_memget_end
= NULL
;
637 seg
->fc_memget_cnt
= 0;
640 seg
->fc_memget_ptr
= *((uint8_t **)bp
);
641 seg
->fc_memget_cnt
--;
646 if (mp
->tag
>= MEM_FCTSEG
) {
647 emlxs_fct_stmf_free(hba
, mp
);
649 #endif /* SFCT_SUPPORT */
651 /* Free the DMA memory itself */
652 bzero(buf_info
, sizeof (MBUF_INFO
));
653 buf_info
->size
= mp
->size
;
654 buf_info
->virt
= mp
->virt
;
655 buf_info
->phys
= mp
->phys
;
656 buf_info
->dma_handle
= mp
->dma_handle
;
657 buf_info
->data_handle
= mp
->data_handle
;
658 buf_info
->flags
= seg
->fc_memflag
;
659 emlxs_mem_free(hba
, buf_info
);
661 /* Free the handle */
662 bzero(buf_info
, sizeof (MBUF_INFO
));
663 buf_info
->size
= sizeof (MATCHMAP
);
664 buf_info
->virt
= (void *)mp
;
665 emlxs_mem_free(hba
, buf_info
);
668 seg
->fc_total_memsize
-= (seg
->fc_memsize
+ sizeof (MATCHMAP
));
677 /* Free memory associated with all buffers on get buffer pool */
678 while (count
&& ((bp
= seg
->fc_memget_ptr
) != NULL
)) {
679 /* Remove buffer from list */
680 if (seg
->fc_memget_end
== bp
) {
681 seg
->fc_memget_ptr
= NULL
;
682 seg
->fc_memget_end
= NULL
;
683 seg
->fc_memget_cnt
= 0;
686 seg
->fc_memget_ptr
= *((uint8_t **)bp
);
687 seg
->fc_memget_cnt
--;
690 /* Free the Virtual memory itself */
691 bzero(buf_info
, sizeof (MBUF_INFO
));
692 buf_info
->size
= seg
->fc_memsize
;
694 emlxs_mem_free(hba
, buf_info
);
697 seg
->fc_total_memsize
-= seg
->fc_memsize
;
704 } /* emlxs_mem_pool_free() */
708 emlxs_mem_pool_create(emlxs_hba_t
*hba
, MEMSEG
*seg
)
710 emlxs_config_t
*cfg
= &CFG
;
712 mutex_enter(&EMLXS_MEMGET_LOCK
);
713 mutex_enter(&EMLXS_MEMPUT_LOCK
);
715 if (seg
->fc_memsize
== 0) {
716 mutex_exit(&EMLXS_MEMPUT_LOCK
);
717 mutex_exit(&EMLXS_MEMGET_LOCK
);
722 /* Sanity check hi > lo */
723 if (seg
->fc_lo_water
> seg
->fc_hi_water
) {
724 seg
->fc_hi_water
= seg
->fc_lo_water
;
727 /* If dynamic pools are disabled, then force pool to max level */
728 if (cfg
[CFG_MEM_DYNAMIC
].current
== 0) {
729 seg
->fc_lo_water
= seg
->fc_hi_water
;
732 /* If pool is dynamic, then fc_step must be >0 */
733 /* Otherwise, fc_step must be 0 */
734 if (seg
->fc_lo_water
!= seg
->fc_hi_water
) {
735 seg
->fc_memflag
|= FC_MEMSEG_DYNAMIC
;
737 if (seg
->fc_step
== 0) {
745 seg
->fc_total_memsize
= 0;
748 (void) emlxs_mem_pool_alloc(hba
, seg
, seg
->fc_lo_water
);
750 seg
->fc_memflag
|= (FC_MEMSEG_PUT_ENABLED
|FC_MEMSEG_GET_ENABLED
);
752 mutex_exit(&EMLXS_MEMPUT_LOCK
);
753 mutex_exit(&EMLXS_MEMGET_LOCK
);
755 return (seg
->fc_numblks
);
757 } /* emlxs_mem_pool_create() */
761 emlxs_mem_pool_destroy(emlxs_hba_t
*hba
, MEMSEG
*seg
)
763 emlxs_port_t
*port
= &PPORT
;
765 mutex_enter(&EMLXS_MEMGET_LOCK
);
766 mutex_enter(&EMLXS_MEMPUT_LOCK
);
768 if (seg
->fc_memsize
== 0) {
769 mutex_exit(&EMLXS_MEMPUT_LOCK
);
770 mutex_exit(&EMLXS_MEMGET_LOCK
);
774 /* Leave FC_MEMSEG_PUT_ENABLED set for now */
775 seg
->fc_memflag
&= ~FC_MEMSEG_GET_ENABLED
;
777 /* Try to free all objects */
778 emlxs_mem_pool_free(hba
, seg
, seg
->fc_numblks
);
780 if (seg
->fc_numblks
) {
781 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
782 "mem_pool_destroy: %s leak detected: "
783 "%d objects still allocated.",
784 seg
->fc_label
, seg
->fc_numblks
);
786 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
787 "mem_pool_destroy: %s destroyed.",
791 bzero(seg
, sizeof (MEMSEG
));
794 mutex_exit(&EMLXS_MEMPUT_LOCK
);
795 mutex_exit(&EMLXS_MEMGET_LOCK
);
799 } /* emlxs_mem_pool_destroy() */
803 emlxs_mem_pool_clean(emlxs_hba_t
*hba
, MEMSEG
*seg
)
805 emlxs_port_t
*port
= &PPORT
;
806 uint32_t clean_count
;
810 mutex_enter(&EMLXS_MEMGET_LOCK
);
811 mutex_enter(&EMLXS_MEMPUT_LOCK
);
813 if (!(seg
->fc_memflag
& FC_MEMSEG_DYNAMIC
)) {
814 mutex_exit(&EMLXS_MEMPUT_LOCK
);
815 mutex_exit(&EMLXS_MEMGET_LOCK
);
819 if (!(seg
->fc_memflag
& FC_MEMSEG_GET_ENABLED
)) {
823 #ifdef EMLXS_POOL_DEBUG
824 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
825 "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
827 seg
->fc_label
, seg
->fc_numblks
,
828 seg
->fc_memsize
, seg
->fc_memflag
, seg
->fc_lo_water
,
829 seg
->fc_hi_water
, seg
->fc_step
, seg
->fc_memget_cnt
,
831 #endif /* EMLXS_POOL_DEBUG */
833 /* Calculatge current free count */
834 free_count
= (seg
->fc_memget_cnt
+ seg
->fc_memput_cnt
);
836 /* Reset fc_low value to current free count */
837 clean_count
= seg
->fc_low
;
838 seg
->fc_low
= free_count
;
840 /* Return if pool is already at lo water mark */
841 if (seg
->fc_numblks
<= seg
->fc_lo_water
) {
845 /* Return if there is nothing to clean */
846 if ((free_count
== 0) ||
847 (clean_count
<= 1)) {
851 /* Calculate a 3 percent free pad count (1 being minimum) */
852 if (seg
->fc_numblks
> 66) {
853 free_pad
= ((seg
->fc_numblks
* 3)/100);
858 /* Return if fc_low is below pool free pad */
859 if (clean_count
<= free_pad
) {
863 clean_count
-= free_pad
;
865 /* clean_count can't exceed minimum pool levels */
866 if (clean_count
> (seg
->fc_numblks
- seg
->fc_lo_water
)) {
867 clean_count
= (seg
->fc_numblks
- seg
->fc_lo_water
);
870 emlxs_mem_pool_free(hba
, seg
, clean_count
);
873 if (seg
->fc_last
!= seg
->fc_numblks
) {
874 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_detail_msg
,
875 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
877 seg
->fc_label
, seg
->fc_last
, seg
->fc_numblks
,
878 seg
->fc_memsize
, seg
->fc_memflag
, seg
->fc_lo_water
,
879 seg
->fc_hi_water
, seg
->fc_step
, seg
->fc_memget_cnt
,
882 seg
->fc_last
= seg
->fc_numblks
;
885 mutex_exit(&EMLXS_MEMPUT_LOCK
);
886 mutex_exit(&EMLXS_MEMGET_LOCK
);
889 } /* emlxs_mem_pool_clean() */
893 emlxs_mem_pool_get(emlxs_hba_t
*hba
, MEMSEG
*seg
)
895 emlxs_port_t
*port
= &PPORT
;
900 mutex_enter(&EMLXS_MEMGET_LOCK
);
902 /* Check if memory pool is GET enabled */
903 if (!(seg
->fc_memflag
& FC_MEMSEG_GET_ENABLED
)) {
904 mutex_exit(&EMLXS_MEMGET_LOCK
);
908 /* If no entries on memget list, then check memput list */
909 if (!seg
->fc_memget_ptr
) {
910 mutex_enter(&EMLXS_MEMPUT_LOCK
);
911 if (seg
->fc_memput_ptr
) {
913 * Move list from memput to memget
915 seg
->fc_memget_ptr
= seg
->fc_memput_ptr
;
916 seg
->fc_memget_end
= seg
->fc_memput_end
;
917 seg
->fc_memget_cnt
= seg
->fc_memput_cnt
;
918 seg
->fc_memput_ptr
= NULL
;
919 seg
->fc_memput_end
= NULL
;
920 seg
->fc_memput_cnt
= 0;
922 mutex_exit(&EMLXS_MEMPUT_LOCK
);
925 /* If no entries on memget list, then pool is empty */
926 /* Try to allocate more if pool is dynamic */
927 if (!seg
->fc_memget_ptr
&&
928 (seg
->fc_memflag
& FC_MEMSEG_DYNAMIC
)) {
929 (void) emlxs_mem_pool_alloc(hba
, seg
, seg
->fc_step
);
933 /* If no entries on memget list, then pool is empty */
934 if (!seg
->fc_memget_ptr
) {
935 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_alloc_failed_msg
,
936 "%s empty.", seg
->fc_label
);
938 mutex_exit(&EMLXS_MEMGET_LOCK
);
942 /* Remove an entry from the get list */
943 bp
= seg
->fc_memget_ptr
;
945 if (seg
->fc_memget_end
== bp
) {
946 seg
->fc_memget_ptr
= NULL
;
947 seg
->fc_memget_end
= NULL
;
948 seg
->fc_memget_cnt
= 0;
951 seg
->fc_memget_ptr
= *((uint8_t **)bp
);
952 seg
->fc_memget_cnt
--;
955 /* Initialize buffer */
956 if (!(seg
->fc_memflag
& FC_MBUF_DMA
)) {
957 bzero(bp
, seg
->fc_memsize
);
961 mp
->flag
|= MAP_POOL_ALLOCATED
;
964 /* Set fc_low if pool is dynamic */
965 if (seg
->fc_memflag
& FC_MEMSEG_DYNAMIC
) {
966 free_count
= (seg
->fc_memget_cnt
+ seg
->fc_memput_cnt
);
967 if (free_count
< seg
->fc_low
) {
968 seg
->fc_low
= free_count
;
972 mutex_exit(&EMLXS_MEMGET_LOCK
);
976 } /* emlxs_mem_pool_get() */
980 emlxs_mem_pool_put(emlxs_hba_t
*hba
, MEMSEG
*seg
, void *bp
)
982 emlxs_port_t
*port
= &PPORT
;
985 /* Free the pool object */
986 mutex_enter(&EMLXS_MEMPUT_LOCK
);
988 /* Check if memory pool is PUT enabled */
989 if (!(seg
->fc_memflag
& FC_MEMSEG_PUT_ENABLED
)) {
990 mutex_exit(&EMLXS_MEMPUT_LOCK
);
994 /* Check if buffer was just freed */
995 if ((seg
->fc_memput_end
== bp
) || (seg
->fc_memget_end
== bp
)) {
996 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
997 "%s: Freeing free object: bp=%p", seg
->fc_label
, bp
);
999 mutex_exit(&EMLXS_MEMPUT_LOCK
);
1003 /* Validate DMA buffer */
1004 if (seg
->fc_memflag
& FC_MBUF_DMA
) {
1005 mp
= (MATCHMAP
*)bp
;
1007 if (!(mp
->flag
& MAP_POOL_ALLOCATED
) ||
1008 (mp
->segment
!= seg
)) {
1009 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1010 "mem_pool_put: %s invalid: mp=%p " \
1011 "tag=0x%x flag=%x", seg
->fc_label
,
1012 mp
, mp
->tag
, mp
->flag
);
1014 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1016 mutex_exit(&EMLXS_MEMPUT_LOCK
);
1018 emlxs_thread_spawn(hba
, emlxs_shutdown_thread
,
1025 /* Release buffer to the end of the memput list */
1026 if (seg
->fc_memput_end
== NULL
) {
1027 seg
->fc_memput_ptr
= bp
;
1028 seg
->fc_memput_cnt
= 1;
1030 *((void **)(seg
->fc_memput_end
)) = bp
;
1031 seg
->fc_memput_cnt
++;
1033 seg
->fc_memput_end
= bp
;
1034 *((void **)(bp
)) = NULL
;
1036 mutex_exit(&EMLXS_MEMPUT_LOCK
);
1038 /* This is for late PUT's after an initial */
1039 /* emlxs_mem_pool_destroy call */
1040 if ((seg
->fc_memflag
& FC_MEMSEG_PUT_ENABLED
) &&
1041 !(seg
->fc_memflag
& FC_MEMSEG_GET_ENABLED
)) {
1042 emlxs_mem_pool_destroy(hba
, seg
);
1047 } /* emlxs_mem_pool_put() */
1051 emlxs_mem_buf_alloc(emlxs_hba_t
*hba
, uint32_t size
)
1053 emlxs_port_t
*port
= &PPORT
;
1055 MATCHMAP
*mp
= NULL
;
1056 MBUF_INFO
*buf_info
;
1059 buf_info
= &bufinfo
;
1061 bzero(buf_info
, sizeof (MBUF_INFO
));
1062 buf_info
->size
= sizeof (MATCHMAP
);
1063 buf_info
->align
= sizeof (void *);
1065 (void) emlxs_mem_alloc(hba
, buf_info
);
1066 if (buf_info
->virt
== NULL
) {
1067 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
1068 "MEM_BUF_ALLOC buffer.");
1073 mp
= (MATCHMAP
*)buf_info
->virt
;
1074 bzero(mp
, sizeof (MATCHMAP
));
1076 bzero(buf_info
, sizeof (MBUF_INFO
));
1077 buf_info
->size
= size
;
1078 buf_info
->flags
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
;
1079 buf_info
->align
= 32;
1081 (void) emlxs_mem_alloc(hba
, buf_info
);
1082 if (buf_info
->virt
== NULL
) {
1084 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_failed_msg
,
1085 "MEM_BUF_ALLOC DMA buffer.");
1087 /* Free the mp object */
1088 bzero(buf_info
, sizeof (MBUF_INFO
));
1089 buf_info
->size
= sizeof (MATCHMAP
);
1090 buf_info
->virt
= (void *)mp
;
1091 emlxs_mem_free(hba
, buf_info
);
1095 bp
= (uint8_t *)buf_info
->virt
;
1096 bzero(bp
, buf_info
->size
);
1098 mp
->virt
= buf_info
->virt
;
1099 mp
->phys
= buf_info
->phys
;
1100 mp
->size
= buf_info
->size
;
1101 mp
->dma_handle
= buf_info
->dma_handle
;
1102 mp
->data_handle
= buf_info
->data_handle
;
1104 mp
->flag
|= MAP_BUF_ALLOCATED
;
1108 } /* emlxs_mem_buf_alloc() */
1112 emlxs_mem_buf_free(emlxs_hba_t
*hba
, MATCHMAP
*mp
)
1115 MBUF_INFO
*buf_info
;
1117 buf_info
= &bufinfo
;
1119 if (!(mp
->flag
& MAP_BUF_ALLOCATED
)) {
1123 bzero(buf_info
, sizeof (MBUF_INFO
));
1124 buf_info
->size
= mp
->size
;
1125 buf_info
->virt
= mp
->virt
;
1126 buf_info
->phys
= mp
->phys
;
1127 buf_info
->dma_handle
= mp
->dma_handle
;
1128 buf_info
->data_handle
= mp
->data_handle
;
1129 buf_info
->flags
= FC_MBUF_DMA
;
1130 emlxs_mem_free(hba
, buf_info
);
1132 bzero(buf_info
, sizeof (MBUF_INFO
));
1133 buf_info
->size
= sizeof (MATCHMAP
);
1134 buf_info
->virt
= (void *)mp
;
1135 emlxs_mem_free(hba
, buf_info
);
1139 } /* emlxs_mem_buf_free() */
1143 emlxs_mem_get(emlxs_hba_t
*hba
, uint32_t seg_id
)
1145 emlxs_port_t
*port
= &PPORT
;
1152 if (seg_id
>= FC_MAX_SEG
) {
1154 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1155 "mem_get: Invalid segment id = %d",
1160 seg
= &hba
->memseg
[seg_id
];
1162 /* Alloc a buffer from the pool */
1163 bp
= emlxs_mem_pool_get(hba
, seg
);
1168 mbq
= (MAILBOXQ
*)bp
;
1169 mbq
->flag
|= MBQ_POOL_ALLOCATED
;
1173 iocbq
= (IOCBQ
*)bp
;
1174 iocbq
->flag
|= IOCB_POOL_ALLOCATED
;
1178 node
= (NODELIST
*)bp
;
1179 node
->flag
|= NODE_POOL_ALLOCATED
;
1186 } /* emlxs_mem_get() */
1190 emlxs_mem_put(emlxs_hba_t
*hba
, uint32_t seg_id
, void *bp
)
1192 emlxs_port_t
*port
= &PPORT
;
1199 if (seg_id
>= FC_MAX_SEG
) {
1201 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1202 "mem_put: Invalid segment id = %d: bp=%p",
1207 seg
= &hba
->memseg
[seg_id
];
1212 mbq
= (MAILBOXQ
*)bp
;
1214 if (!(mbq
->flag
& MBQ_POOL_ALLOCATED
)) {
1220 iocbq
= (IOCBQ
*)bp
;
1222 if (!(iocbq
->flag
& IOCB_POOL_ALLOCATED
)) {
1226 /* Any IOCBQ with a packet attached did not come */
1234 node
= (NODELIST
*)bp
;
1236 if (!(node
->flag
& NODE_POOL_ALLOCATED
)) {
1242 mp
= (MATCHMAP
*)bp
;
1244 if (mp
->flag
& MAP_BUF_ALLOCATED
) {
1245 emlxs_mem_buf_free(hba
, mp
);
1249 if (mp
->flag
& MAP_TABLE_ALLOCATED
) {
1253 if (!(mp
->flag
& MAP_POOL_ALLOCATED
)) {
1259 /* Free a buffer to the pool */
1260 emlxs_mem_pool_put(hba
, seg
, bp
);
1264 } /* emlxs_mem_put() */
1268 * Look up the virtual address given a mapped address
1272 emlxs_mem_get_vaddr(emlxs_hba_t
*hba
, RING
*rp
, uint64_t mapbp
)
1274 emlxs_port_t
*port
= &PPORT
;
1278 if (rp
->ringno
== hba
->channel_els
) {
1279 mp
= (MATCHMAP
*)rp
->fc_mpoff
;
1283 if (mp
->phys
== mapbp
) {
1285 rp
->fc_mpoff
= mp
->fc_mptr
;
1287 prev
->fc_mptr
= mp
->fc_mptr
;
1290 if (rp
->fc_mpon
== mp
) {
1291 rp
->fc_mpon
= (void *)prev
;
1296 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
1297 DDI_DMA_SYNC_FORKERNEL
);
1299 HBASTATS
.ElsUbPosted
--;
1305 mp
= (MATCHMAP
*)mp
->fc_mptr
;
1308 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1309 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1310 mapbp
, rp
->ringno
, rp
->fc_mpoff
, rp
->fc_mpon
);
1312 } else if (rp
->ringno
== hba
->channel_ct
) {
1314 mp
= (MATCHMAP
*)rp
->fc_mpoff
;
1318 if (mp
->phys
== mapbp
) {
1320 rp
->fc_mpoff
= mp
->fc_mptr
;
1322 prev
->fc_mptr
= mp
->fc_mptr
;
1325 if (rp
->fc_mpon
== mp
) {
1326 rp
->fc_mpon
= (void *)prev
;
1331 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
1332 DDI_DMA_SYNC_FORKERNEL
);
1334 HBASTATS
.CtUbPosted
--;
1340 mp
= (MATCHMAP
*)mp
->fc_mptr
;
1343 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1344 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1345 mapbp
, rp
->ringno
, rp
->fc_mpoff
, rp
->fc_mpon
);
1347 } else if (rp
->ringno
== hba
->channel_ip
) {
1349 mp
= (MATCHMAP
*)rp
->fc_mpoff
;
1353 if (mp
->phys
== mapbp
) {
1355 rp
->fc_mpoff
= mp
->fc_mptr
;
1357 prev
->fc_mptr
= mp
->fc_mptr
;
1360 if (rp
->fc_mpon
== mp
) {
1361 rp
->fc_mpon
= (void *)prev
;
1366 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
1367 DDI_DMA_SYNC_FORKERNEL
);
1369 HBASTATS
.IpUbPosted
--;
1375 mp
= (MATCHMAP
*)mp
->fc_mptr
;
1378 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1379 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1380 mapbp
, rp
->ringno
, rp
->fc_mpoff
, rp
->fc_mpon
);
1383 } else if (rp
->ringno
== hba
->CHANNEL_FCT
) {
1384 mp
= (MATCHMAP
*)rp
->fc_mpoff
;
1388 if (mp
->phys
== mapbp
) {
1390 rp
->fc_mpoff
= mp
->fc_mptr
;
1392 prev
->fc_mptr
= mp
->fc_mptr
;
1395 if (rp
->fc_mpon
== mp
) {
1396 rp
->fc_mpon
= (void *)prev
;
1401 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
1402 DDI_DMA_SYNC_FORKERNEL
);
1404 HBASTATS
.FctUbPosted
--;
1410 mp
= (MATCHMAP
*)mp
->fc_mptr
;
1413 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pool_error_msg
,
1414 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1415 mapbp
, rp
->ringno
, rp
->fc_mpoff
, rp
->fc_mpon
);
1417 #endif /* SFCT_SUPPORT */
1422 } /* emlxs_mem_get_vaddr() */
1426 * Given a virtual address bp, generate the physical mapped address and
1427 * place it where addr points to. Save the address pair for lookup later.
1431 emlxs_mem_map_vaddr(emlxs_hba_t
*hba
, RING
*rp
, MATCHMAP
*mp
,
1432 uint32_t *haddr
, uint32_t *laddr
)
1434 if (rp
->ringno
== hba
->channel_els
) {
1436 * Update slot fc_mpon points to then bump it
1437 * fc_mpoff is pointer head of the list.
1438 * fc_mpon is pointer tail of the list.
1441 if (rp
->fc_mpoff
== 0) {
1442 rp
->fc_mpoff
= (void *)mp
;
1443 rp
->fc_mpon
= (void *)mp
;
1445 ((MATCHMAP
*)(rp
->fc_mpon
))->fc_mptr
=
1447 rp
->fc_mpon
= (void *)mp
;
1450 if (hba
->flag
& FC_SLIM2_MODE
) {
1452 /* return mapped address */
1453 *haddr
= PADDR_HI(mp
->phys
);
1454 /* return mapped address */
1455 *laddr
= PADDR_LO(mp
->phys
);
1457 /* return mapped address */
1458 *laddr
= PADDR_LO(mp
->phys
);
1461 HBASTATS
.ElsUbPosted
++;
1463 } else if (rp
->ringno
== hba
->channel_ct
) {
1465 * Update slot fc_mpon points to then bump it
1466 * fc_mpoff is pointer head of the list.
1467 * fc_mpon is pointer tail of the list.
1470 if (rp
->fc_mpoff
== 0) {
1471 rp
->fc_mpoff
= (void *)mp
;
1472 rp
->fc_mpon
= (void *)mp
;
1474 ((MATCHMAP
*)(rp
->fc_mpon
))->fc_mptr
=
1476 rp
->fc_mpon
= (void *)mp
;
1479 if (hba
->flag
& FC_SLIM2_MODE
) {
1480 /* return mapped address */
1481 *haddr
= PADDR_HI(mp
->phys
);
1482 /* return mapped address */
1483 *laddr
= PADDR_LO(mp
->phys
);
1485 /* return mapped address */
1486 *laddr
= PADDR_LO(mp
->phys
);
1489 HBASTATS
.CtUbPosted
++;
1492 } else if (rp
->ringno
== hba
->channel_ip
) {
1494 * Update slot fc_mpon points to then bump it
1495 * fc_mpoff is pointer head of the list.
1496 * fc_mpon is pointer tail of the list.
1499 if (rp
->fc_mpoff
== 0) {
1500 rp
->fc_mpoff
= (void *)mp
;
1501 rp
->fc_mpon
= (void *)mp
;
1503 ((MATCHMAP
*)(rp
->fc_mpon
))->fc_mptr
=
1505 rp
->fc_mpon
= (void *)mp
;
1508 if (hba
->flag
& FC_SLIM2_MODE
) {
1509 /* return mapped address */
1510 *haddr
= PADDR_HI(mp
->phys
);
1511 *laddr
= PADDR_LO(mp
->phys
);
1513 *laddr
= PADDR_LO(mp
->phys
);
1516 HBASTATS
.IpUbPosted
++;
1520 } else if (rp
->ringno
== hba
->CHANNEL_FCT
) {
1522 * Update slot fc_mpon points to then bump it
1523 * fc_mpoff is pointer head of the list.
1524 * fc_mpon is pointer tail of the list.
1527 if (rp
->fc_mpoff
== 0) {
1528 rp
->fc_mpoff
= (void *)mp
;
1529 rp
->fc_mpon
= (void *)mp
;
1531 ((MATCHMAP
*)(rp
->fc_mpon
))->fc_mptr
=
1533 rp
->fc_mpon
= (void *)mp
;
1536 if (hba
->flag
& FC_SLIM2_MODE
) {
1537 /* return mapped address */
1538 *haddr
= PADDR_HI(mp
->phys
);
1539 /* return mapped address */
1540 *laddr
= PADDR_LO(mp
->phys
);
1542 /* return mapped address */
1543 *laddr
= PADDR_LO(mp
->phys
);
1546 HBASTATS
.FctUbPosted
++;
1548 #endif /* SFCT_SUPPORT */
1550 } /* emlxs_mem_map_vaddr() */
1555 emlxs_hbq_alloc(emlxs_hba_t
*hba
, uint32_t hbq_id
)
1557 emlxs_port_t
*port
= &PPORT
;
1559 MBUF_INFO
*buf_info
;
1562 hbq
= &hba
->sli
.sli3
.hbq_table
[hbq_id
];
1564 if (hbq
->HBQ_host_buf
.virt
== 0) {
1565 buf_info
= &bufinfo
;
1567 /* Get the system's page size in a DDI-compliant way. */
1568 bzero(buf_info
, sizeof (MBUF_INFO
));
1569 buf_info
->size
= hbq
->HBQ_numEntries
* sizeof (HBQE_t
);
1570 buf_info
->flags
= FC_MBUF_DMA
;
1571 buf_info
->align
= 4096;
1573 (void) emlxs_mem_alloc(hba
, buf_info
);
1575 if (buf_info
->virt
== NULL
) {
1576 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mem_alloc_msg
,
1577 "Unable to alloc HBQ.");
1581 hbq
->HBQ_host_buf
.virt
= buf_info
->virt
;
1582 hbq
->HBQ_host_buf
.phys
= buf_info
->phys
;
1583 hbq
->HBQ_host_buf
.data_handle
= buf_info
->data_handle
;
1584 hbq
->HBQ_host_buf
.dma_handle
= buf_info
->dma_handle
;
1585 hbq
->HBQ_host_buf
.size
= buf_info
->size
;
1586 hbq
->HBQ_host_buf
.tag
= hbq_id
;
1588 bzero((char *)hbq
->HBQ_host_buf
.virt
, buf_info
->size
);
1593 } /* emlxs_hbq_alloc() */