15391 Add support for Emulex LPe35000/LPe36000 32Gb/64Gb fibre channel chipsets
[illumos-gate.git] / usr / src / uts / common / io / fibre-channel / fca / emlxs / emlxs_mem.c
bloba1eea0434fbb162b90cabb4b83dcef7779a15b31
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004-2011 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 * Copyright 2020 RackTop Systems, Inc.
29 #include <emlxs.h>
31 /* #define EMLXS_POOL_DEBUG */
33 EMLXS_MSG_DEF(EMLXS_MEM_C);
36 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
37 uint32_t count);
38 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
41 extern int32_t
42 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
44 emlxs_port_t *port = &PPORT;
45 emlxs_config_t *cfg;
46 MBUF_INFO *buf_info;
47 MEMSEG *seg;
48 MBUF_INFO bufinfo;
49 int32_t i;
50 MATCHMAP *mp;
51 MATCHMAP **bpl_table;
53 buf_info = &bufinfo;
54 cfg = &CFG;
56 bzero(hba->memseg, sizeof (hba->memseg));
58 /* Allocate the fc_table */
59 bzero(buf_info, sizeof (MBUF_INFO));
60 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
62 (void) emlxs_mem_alloc(hba, buf_info);
63 if (buf_info->virt == NULL) {
65 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
66 "fc_table buffer.");
68 goto failed;
70 hba->fc_table = buf_info->virt;
71 bzero(hba->fc_table, buf_info->size);
73 /* Prepare the memory pools */
74 for (i = 0; i < FC_MAX_SEG; i++) {
75 seg = &hba->memseg[i];
77 switch (i) {
78 case MEM_NLP:
79 (void) strlcpy(seg->fc_label, "Node Pool",
80 sizeof (seg->fc_label));
81 seg->fc_memtag = MEM_NLP;
82 seg->fc_memsize = sizeof (NODELIST);
83 seg->fc_hi_water = hba->max_nodes + 2;
84 seg->fc_lo_water = 2;
85 seg->fc_step = 1;
86 break;
88 case MEM_IOCB:
89 (void) strlcpy(seg->fc_label, "IOCB Pool",
90 sizeof (seg->fc_label));
91 seg->fc_memtag = MEM_IOCB;
92 seg->fc_memsize = sizeof (IOCBQ);
93 seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
94 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
95 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
96 break;
98 case MEM_MBOX:
99 (void) strlcpy(seg->fc_label, "MBOX Pool",
100 sizeof (seg->fc_label));
101 seg->fc_memtag = MEM_MBOX;
102 seg->fc_memsize = sizeof (MAILBOXQ);
103 seg->fc_hi_water = hba->max_nodes + 32;
104 seg->fc_lo_water = 32;
105 seg->fc_step = 1;
106 break;
108 case MEM_BPL:
109 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
110 continue;
112 (void) strlcpy(seg->fc_label, "BPL Pool",
113 sizeof (seg->fc_label));
114 seg->fc_memtag = MEM_BPL;
115 seg->fc_memsize = hba->sli.sli3.mem_bpl_size;
116 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
117 seg->fc_memalign = 32;
118 seg->fc_hi_water = hba->max_iotag;
119 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
120 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
121 break;
123 case MEM_BUF:
124 /* These are the unsolicited ELS buffers. */
125 (void) strlcpy(seg->fc_label, "BUF Pool",
126 sizeof (seg->fc_label));
127 seg->fc_memtag = MEM_BUF;
128 seg->fc_memsize = MEM_BUF_SIZE;
129 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
130 seg->fc_memalign = 32;
131 seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
132 seg->fc_lo_water = MEM_ELSBUF_COUNT;
133 seg->fc_step = 1;
134 break;
136 case MEM_IPBUF:
137 /* These are the unsolicited IP buffers. */
138 if (cfg[CFG_NETWORK_ON].current == 0) {
139 continue;
142 (void) strlcpy(seg->fc_label, "IPBUF Pool",
143 sizeof (seg->fc_label));
144 seg->fc_memtag = MEM_IPBUF;
145 seg->fc_memsize = MEM_IPBUF_SIZE;
146 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
147 seg->fc_memalign = 32;
148 seg->fc_hi_water = MEM_IPBUF_COUNT;
149 seg->fc_lo_water = 0;
150 seg->fc_step = 4;
151 break;
153 case MEM_CTBUF:
154 /* These are the unsolicited CT buffers. */
155 (void) strlcpy(seg->fc_label, "CTBUF Pool",
156 sizeof (seg->fc_label));
157 seg->fc_memtag = MEM_CTBUF;
158 seg->fc_memsize = MEM_CTBUF_SIZE;
159 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
160 seg->fc_memalign = 32;
161 seg->fc_hi_water = MEM_CTBUF_COUNT;
162 seg->fc_lo_water = MEM_CTBUF_COUNT;
163 seg->fc_step = 1;
164 break;
166 case MEM_SGL1K:
167 (void) strlcpy(seg->fc_label, "1K SGL Pool",
168 sizeof (seg->fc_label));
169 seg->fc_memtag = MEM_SGL1K;
170 seg->fc_memsize = 0x400;
171 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
172 seg->fc_memalign = 32;
173 seg->fc_hi_water = 0x5000;
174 seg->fc_lo_water = 0;
175 seg->fc_step = 0x100;
176 break;
178 case MEM_SGL2K:
179 (void) strlcpy(seg->fc_label, "2K SGL Pool",
180 sizeof (seg->fc_label));
181 seg->fc_memtag = MEM_SGL2K;
182 seg->fc_memsize = 0x800;
183 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
184 seg->fc_memalign = 32;
185 seg->fc_hi_water = 0x5000;
186 seg->fc_lo_water = 0;
187 seg->fc_step = 0x100;
188 break;
190 case MEM_SGL4K:
191 (void) strlcpy(seg->fc_label, "4K SGL Pool",
192 sizeof (seg->fc_label));
193 seg->fc_memtag = MEM_SGL4K;
194 seg->fc_memsize = 0x1000;
195 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
196 seg->fc_memalign = 32;
197 seg->fc_hi_water = 0x5000;
198 seg->fc_lo_water = 0;
199 seg->fc_step = 0x100;
200 break;
202 #ifdef SFCT_SUPPORT
203 case MEM_FCTBUF:
204 /* These are the unsolicited FCT buffers. */
205 if (!(port->flag & EMLXS_TGT_ENABLED)) {
206 continue;
209 (void) strlcpy(seg->fc_label, "FCTBUF Pool",
210 sizeof (seg->fc_label));
211 seg->fc_memtag = MEM_FCTBUF;
212 seg->fc_memsize = MEM_FCTBUF_SIZE;
213 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
214 seg->fc_memalign = 32;
215 seg->fc_hi_water = MEM_FCTBUF_COUNT;
216 seg->fc_lo_water = 0;
217 seg->fc_step = 8;
218 break;
219 #endif /* SFCT_SUPPORT */
221 default:
222 continue;
225 if (seg->fc_memsize == 0) {
226 continue;
229 (void) emlxs_mem_pool_create(hba, seg);
231 if (seg->fc_numblks < seg->fc_lo_water) {
232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
233 "%s: count=%d size=%d flags=%x lo=%d hi=%d",
234 seg->fc_label, seg->fc_numblks,
235 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
236 seg->fc_hi_water);
238 goto failed;
242 hba->sli.sli3.bpl_table = NULL;
243 seg = &hba->memseg[MEM_BPL];
245 /* If SLI3 and MEM_BPL pool is static */
246 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
247 !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
249 * Allocate and Initialize bpl_table
250 * This is for increased performance.
252 bzero(buf_info, sizeof (MBUF_INFO));
253 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
255 (void) emlxs_mem_alloc(hba, buf_info);
256 if (buf_info->virt == NULL) {
258 EMLXS_MSGF(EMLXS_CONTEXT,
259 &emlxs_mem_alloc_failed_msg,
260 "BPL table buffer.");
262 goto failed;
264 hba->sli.sli3.bpl_table = buf_info->virt;
266 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
267 for (i = 0; i < hba->max_iotag; i++) {
268 mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
269 mp->flag |= MAP_TABLE_ALLOCATED;
270 bpl_table[i] = mp;
274 return (1);
276 failed:
278 (void) emlxs_mem_free_buffer(hba);
279 return (0);
281 } /* emlxs_mem_alloc_buffer() */
285 * emlxs_mem_free_buffer
287 * This routine will free iocb/data buffer space
288 * and TGTM resource.
290 extern int
291 emlxs_mem_free_buffer(emlxs_hba_t *hba)
293 emlxs_port_t *port = &PPORT;
294 emlxs_port_t *vport;
295 int32_t j;
296 MATCHMAP *mp;
297 CHANNEL *cp;
298 RING *rp;
299 MBUF_INFO *buf_info;
300 MBUF_INFO bufinfo;
301 MATCHMAP **bpl_table;
303 buf_info = &bufinfo;
305 for (j = 0; j < hba->chan_count; j++) {
306 cp = &hba->chan[j];
308 /* Flush the ring */
309 (void) emlxs_tx_channel_flush(hba, cp, 0);
312 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
313 /* free the mapped address match area for each ring */
314 for (j = 0; j < MAX_RINGS; j++) {
315 rp = &hba->sli.sli3.ring[j];
317 while (rp->fc_mpoff) {
318 uint64_t addr;
320 addr = 0;
321 mp = (MATCHMAP *)(rp->fc_mpoff);
323 if ((j == hba->channel_els) ||
324 (j == hba->channel_ct) ||
325 #ifdef SFCT_SUPPORT
326 (j == hba->CHANNEL_FCT) ||
327 #endif /* SFCT_SUPPORT */
328 (j == hba->channel_ip)) {
329 addr = mp->phys;
332 if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
333 if (j == hba->channel_els) {
334 emlxs_mem_put(hba,
335 MEM_ELSBUF, (void *)mp);
336 } else if (j == hba->channel_ct) {
337 emlxs_mem_put(hba,
338 MEM_CTBUF, (void *)mp);
339 } else if (j == hba->channel_ip) {
340 emlxs_mem_put(hba,
341 MEM_IPBUF, (void *)mp);
343 #ifdef SFCT_SUPPORT
344 else if (j == hba->CHANNEL_FCT) {
345 emlxs_mem_put(hba,
346 MEM_FCTBUF, (void *)mp);
348 #endif /* SFCT_SUPPORT */
355 if (hba->flag & FC_HBQ_ENABLED) {
356 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
357 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
358 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
360 if (port->flag & EMLXS_TGT_ENABLED) {
361 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
365 /* Free the nodes */
366 for (j = 0; j < MAX_VPORTS; j++) {
367 vport = &VPORT(j);
368 if (vport->node_count) {
369 emlxs_node_destroy_all(vport);
373 /* Make sure the mailbox queue is empty */
374 emlxs_mb_flush(hba);
376 if (hba->fc_table) {
377 bzero(buf_info, sizeof (MBUF_INFO));
378 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
379 buf_info->virt = hba->fc_table;
380 emlxs_mem_free(hba, buf_info);
381 hba->fc_table = NULL;
384 if (hba->sli.sli3.bpl_table) {
385 /* Return MEM_BPLs to their pool */
386 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
387 for (j = 0; j < hba->max_iotag; j++) {
388 mp = bpl_table[j];
389 mp->flag &= ~MAP_TABLE_ALLOCATED;
390 emlxs_mem_put(hba, MEM_BPL, (void*)mp);
393 bzero(buf_info, sizeof (MBUF_INFO));
394 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
395 buf_info->virt = hba->sli.sli3.bpl_table;
396 emlxs_mem_free(hba, buf_info);
397 hba->sli.sli3.bpl_table = NULL;
400 /* Free the memory segments */
401 for (j = 0; j < FC_MAX_SEG; j++) {
402 emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
405 return (0);
407 } /* emlxs_mem_free_buffer() */
410 /* Must hold EMLXS_MEMGET_LOCK when calling */
411 static uint32_t
412 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
414 emlxs_port_t *port = &PPORT;
415 uint8_t *bp = NULL;
416 MATCHMAP *mp = NULL;
417 MBUF_INFO *buf_info;
418 MBUF_INFO local_buf_info;
419 uint32_t i;
420 uint32_t fc_numblks;
422 if (seg->fc_memsize == 0) {
423 return (0);
426 if (seg->fc_numblks >= seg->fc_hi_water) {
427 return (0);
430 if (count == 0) {
431 return (0);
434 if (count > (seg->fc_hi_water - seg->fc_numblks)) {
435 count = (seg->fc_hi_water - seg->fc_numblks);
438 buf_info = &local_buf_info;
439 fc_numblks = seg->fc_numblks;
441 /* Check for initial allocation */
442 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
444 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d f=%d:%d",
445 seg->fc_label, count, seg->fc_numblks,
446 seg->fc_memsize, seg->fc_memflag,
447 seg->fc_lo_water, seg->fc_hi_water, seg->fc_step,
448 seg->fc_memget_cnt, seg->fc_low);
451 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
452 goto vmem_pool;
455 /* dma_pool */
457 for (i = 0; i < count; i++) {
458 bzero(buf_info, sizeof (MBUF_INFO));
459 buf_info->size = sizeof (MATCHMAP);
460 buf_info->align = sizeof (void *);
462 (void) emlxs_mem_alloc(hba, buf_info);
463 if (buf_info->virt == NULL) {
464 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
465 "%s: count=%d size=%d",
466 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
468 goto done;
471 mp = (MATCHMAP *)buf_info->virt;
472 bzero(mp, sizeof (MATCHMAP));
474 bzero(buf_info, sizeof (MBUF_INFO));
475 buf_info->size = seg->fc_memsize;
476 buf_info->flags = seg->fc_memflag;
477 buf_info->align = seg->fc_memalign;
479 (void) emlxs_mem_alloc(hba, buf_info);
480 if (buf_info->virt == NULL) {
481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
482 "%s: count=%d size=%d",
483 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
485 /* Free the mp object */
486 bzero(buf_info, sizeof (MBUF_INFO));
487 buf_info->size = sizeof (MATCHMAP);
488 buf_info->virt = (void *)mp;
489 emlxs_mem_free(hba, buf_info);
491 goto done;
493 bp = (uint8_t *)buf_info->virt;
494 bzero(bp, seg->fc_memsize);
496 mp->virt = buf_info->virt;
497 mp->phys = buf_info->phys;
498 mp->size = buf_info->size;
499 mp->dma_handle = buf_info->dma_handle;
500 mp->data_handle = buf_info->data_handle;
501 mp->tag = seg->fc_memtag;
502 mp->segment = seg;
503 mp->flag |= MAP_POOL_ALLOCATED;
505 #ifdef SFCT_SUPPORT
506 if (mp->tag >= MEM_FCTSEG) {
507 if (emlxs_fct_stmf_alloc(hba, mp)) {
508 /* Free the DMA memory itself */
509 emlxs_mem_free(hba, buf_info);
511 /* Free the mp object */
512 bzero(buf_info, sizeof (MBUF_INFO));
513 buf_info->size = sizeof (MATCHMAP);
514 buf_info->virt = (void *)mp;
515 emlxs_mem_free(hba, buf_info);
517 goto done;
520 #endif /* SFCT_SUPPORT */
522 /* Add the buffer desc to the tail of the pool freelist */
523 if (seg->fc_memget_end == NULL) {
524 seg->fc_memget_ptr = (uint8_t *)mp;
525 seg->fc_memget_cnt = 1;
526 } else {
527 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
528 seg->fc_memget_cnt++;
530 seg->fc_memget_end = (uint8_t *)mp;
532 seg->fc_numblks++;
533 seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
536 goto done;
538 vmem_pool:
540 for (i = 0; i < count; i++) {
541 bzero(buf_info, sizeof (MBUF_INFO));
542 buf_info->size = seg->fc_memsize;
544 (void) emlxs_mem_alloc(hba, buf_info);
545 if (buf_info->virt == NULL) {
546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
547 "%s: count=%d size=%d",
548 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
550 goto done;
552 bp = (uint8_t *)buf_info->virt;
554 /* Add the buffer to the tail of the pool freelist */
555 if (seg->fc_memget_end == NULL) {
556 seg->fc_memget_ptr = (uint8_t *)bp;
557 seg->fc_memget_cnt = 1;
558 } else {
559 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
560 seg->fc_memget_cnt++;
562 seg->fc_memget_end = (uint8_t *)bp;
564 seg->fc_numblks++;
565 seg->fc_total_memsize += seg->fc_memsize;
568 done:
570 return ((seg->fc_numblks - fc_numblks));
572 } /* emlxs_mem_pool_alloc() */
575 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
576 static void
577 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
579 emlxs_port_t *port = &PPORT;
580 uint8_t *bp = NULL;
581 MATCHMAP *mp = NULL;
582 MBUF_INFO *buf_info;
583 MBUF_INFO local_buf_info;
585 if ((seg->fc_memsize == 0) ||
586 (seg->fc_numblks == 0) ||
587 (count == 0)) {
588 return;
591 /* Check max count */
592 if (count > seg->fc_numblks) {
593 count = seg->fc_numblks;
596 /* Move memput list to memget list */
597 if (seg->fc_memput_ptr) {
598 if (seg->fc_memget_end == NULL) {
599 seg->fc_memget_ptr = seg->fc_memput_ptr;
600 } else {
601 *((uint8_t **)(seg->fc_memget_end)) =\
602 seg->fc_memput_ptr;
604 seg->fc_memget_end = seg->fc_memput_end;
605 seg->fc_memget_cnt += seg->fc_memput_cnt;
607 seg->fc_memput_ptr = NULL;
608 seg->fc_memput_end = NULL;
609 seg->fc_memput_cnt = 0;
612 buf_info = &local_buf_info;
614 /* Check for final deallocation */
615 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
617 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
618 "f=%d:%d",
619 seg->fc_label, count, seg->fc_numblks,
620 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
621 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
622 seg->fc_low);
625 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
626 goto vmem_pool;
629 dma_pool:
631 /* Free memory associated with all buffers on get buffer pool */
632 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
633 /* Remove buffer from list */
634 if (seg->fc_memget_end == bp) {
635 seg->fc_memget_ptr = NULL;
636 seg->fc_memget_end = NULL;
637 seg->fc_memget_cnt = 0;
639 } else {
640 seg->fc_memget_ptr = *((uint8_t **)bp);
641 seg->fc_memget_cnt--;
643 mp = (MATCHMAP *)bp;
645 #ifdef SFCT_SUPPORT
646 if (mp->tag >= MEM_FCTSEG) {
647 emlxs_fct_stmf_free(hba, mp);
649 #endif /* SFCT_SUPPORT */
651 /* Free the DMA memory itself */
652 bzero(buf_info, sizeof (MBUF_INFO));
653 buf_info->size = mp->size;
654 buf_info->virt = mp->virt;
655 buf_info->phys = mp->phys;
656 buf_info->dma_handle = mp->dma_handle;
657 buf_info->data_handle = mp->data_handle;
658 buf_info->flags = seg->fc_memflag;
659 emlxs_mem_free(hba, buf_info);
661 /* Free the handle */
662 bzero(buf_info, sizeof (MBUF_INFO));
663 buf_info->size = sizeof (MATCHMAP);
664 buf_info->virt = (void *)mp;
665 emlxs_mem_free(hba, buf_info);
667 seg->fc_numblks--;
668 seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
670 count--;
673 return;
675 vmem_pool:
677 /* Free memory associated with all buffers on get buffer pool */
678 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
679 /* Remove buffer from list */
680 if (seg->fc_memget_end == bp) {
681 seg->fc_memget_ptr = NULL;
682 seg->fc_memget_end = NULL;
683 seg->fc_memget_cnt = 0;
685 } else {
686 seg->fc_memget_ptr = *((uint8_t **)bp);
687 seg->fc_memget_cnt--;
690 /* Free the Virtual memory itself */
691 bzero(buf_info, sizeof (MBUF_INFO));
692 buf_info->size = seg->fc_memsize;
693 buf_info->virt = bp;
694 emlxs_mem_free(hba, buf_info);
696 seg->fc_numblks--;
697 seg->fc_total_memsize -= seg->fc_memsize;
699 count--;
702 return;
704 } /* emlxs_mem_pool_free() */
707 extern uint32_t
708 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
710 emlxs_config_t *cfg = &CFG;
712 mutex_enter(&EMLXS_MEMGET_LOCK);
713 mutex_enter(&EMLXS_MEMPUT_LOCK);
715 if (seg->fc_memsize == 0) {
716 mutex_exit(&EMLXS_MEMPUT_LOCK);
717 mutex_exit(&EMLXS_MEMGET_LOCK);
719 return (0);
722 /* Sanity check hi > lo */
723 if (seg->fc_lo_water > seg->fc_hi_water) {
724 seg->fc_hi_water = seg->fc_lo_water;
727 /* If dynamic pools are disabled, then force pool to max level */
728 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
729 seg->fc_lo_water = seg->fc_hi_water;
732 /* If pool is dynamic, then fc_step must be >0 */
733 /* Otherwise, fc_step must be 0 */
734 if (seg->fc_lo_water != seg->fc_hi_water) {
735 seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
737 if (seg->fc_step == 0) {
738 seg->fc_step = 1;
740 } else {
741 seg->fc_step = 0;
744 seg->fc_numblks = 0;
745 seg->fc_total_memsize = 0;
746 seg->fc_low = 0;
748 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
750 seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
752 mutex_exit(&EMLXS_MEMPUT_LOCK);
753 mutex_exit(&EMLXS_MEMGET_LOCK);
755 return (seg->fc_numblks);
757 } /* emlxs_mem_pool_create() */
760 extern void
761 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
763 emlxs_port_t *port = &PPORT;
765 mutex_enter(&EMLXS_MEMGET_LOCK);
766 mutex_enter(&EMLXS_MEMPUT_LOCK);
768 if (seg->fc_memsize == 0) {
769 mutex_exit(&EMLXS_MEMPUT_LOCK);
770 mutex_exit(&EMLXS_MEMGET_LOCK);
771 return;
774 /* Leave FC_MEMSEG_PUT_ENABLED set for now */
775 seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
777 /* Try to free all objects */
778 emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
780 if (seg->fc_numblks) {
781 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
782 "mem_pool_destroy: %s leak detected: "
783 "%d objects still allocated.",
784 seg->fc_label, seg->fc_numblks);
785 } else {
786 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
787 "mem_pool_destroy: %s destroyed.",
788 seg->fc_label);
790 /* Clear all */
791 bzero(seg, sizeof (MEMSEG));
794 mutex_exit(&EMLXS_MEMPUT_LOCK);
795 mutex_exit(&EMLXS_MEMGET_LOCK);
797 return;
799 } /* emlxs_mem_pool_destroy() */
802 extern void
803 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
805 emlxs_port_t *port = &PPORT;
806 uint32_t clean_count;
807 uint32_t free_count;
808 uint32_t free_pad;
810 mutex_enter(&EMLXS_MEMGET_LOCK);
811 mutex_enter(&EMLXS_MEMPUT_LOCK);
813 if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
814 mutex_exit(&EMLXS_MEMPUT_LOCK);
815 mutex_exit(&EMLXS_MEMGET_LOCK);
816 return;
819 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
820 goto done;
823 #ifdef EMLXS_POOL_DEBUG
824 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
825 "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
826 "f=%d:%d",
827 seg->fc_label, seg->fc_numblks,
828 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
829 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
830 seg->fc_low);
831 #endif /* EMLXS_POOL_DEBUG */
833 /* Calculatge current free count */
834 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
836 /* Reset fc_low value to current free count */
837 clean_count = seg->fc_low;
838 seg->fc_low = free_count;
840 /* Return if pool is already at lo water mark */
841 if (seg->fc_numblks <= seg->fc_lo_water) {
842 goto done;
845 /* Return if there is nothing to clean */
846 if ((free_count == 0) ||
847 (clean_count <= 1)) {
848 goto done;
851 /* Calculate a 3 percent free pad count (1 being minimum) */
852 if (seg->fc_numblks > 66) {
853 free_pad = ((seg->fc_numblks * 3)/100);
854 } else {
855 free_pad = 1;
858 /* Return if fc_low is below pool free pad */
859 if (clean_count <= free_pad) {
860 goto done;
863 clean_count -= free_pad;
865 /* clean_count can't exceed minimum pool levels */
866 if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
867 clean_count = (seg->fc_numblks - seg->fc_lo_water);
870 emlxs_mem_pool_free(hba, seg, clean_count);
872 done:
873 if (seg->fc_last != seg->fc_numblks) {
874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
875 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
876 "f=%d:%d",
877 seg->fc_label, seg->fc_last, seg->fc_numblks,
878 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
879 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
880 seg->fc_low);
882 seg->fc_last = seg->fc_numblks;
885 mutex_exit(&EMLXS_MEMPUT_LOCK);
886 mutex_exit(&EMLXS_MEMGET_LOCK);
887 return;
889 } /* emlxs_mem_pool_clean() */
892 extern void *
893 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
895 emlxs_port_t *port = &PPORT;
896 void *bp = NULL;
897 MATCHMAP *mp;
898 uint32_t free_count;
900 mutex_enter(&EMLXS_MEMGET_LOCK);
902 /* Check if memory pool is GET enabled */
903 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
904 mutex_exit(&EMLXS_MEMGET_LOCK);
905 return (NULL);
908 /* If no entries on memget list, then check memput list */
909 if (!seg->fc_memget_ptr) {
910 mutex_enter(&EMLXS_MEMPUT_LOCK);
911 if (seg->fc_memput_ptr) {
913 * Move list from memput to memget
915 seg->fc_memget_ptr = seg->fc_memput_ptr;
916 seg->fc_memget_end = seg->fc_memput_end;
917 seg->fc_memget_cnt = seg->fc_memput_cnt;
918 seg->fc_memput_ptr = NULL;
919 seg->fc_memput_end = NULL;
920 seg->fc_memput_cnt = 0;
922 mutex_exit(&EMLXS_MEMPUT_LOCK);
925 /* If no entries on memget list, then pool is empty */
926 /* Try to allocate more if pool is dynamic */
927 if (!seg->fc_memget_ptr &&
928 (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
929 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_step);
930 seg->fc_low = 0;
933 /* If no entries on memget list, then pool is empty */
934 if (!seg->fc_memget_ptr) {
935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
936 "%s empty.", seg->fc_label);
938 mutex_exit(&EMLXS_MEMGET_LOCK);
939 return (NULL);
942 /* Remove an entry from the get list */
943 bp = seg->fc_memget_ptr;
945 if (seg->fc_memget_end == bp) {
946 seg->fc_memget_ptr = NULL;
947 seg->fc_memget_end = NULL;
948 seg->fc_memget_cnt = 0;
950 } else {
951 seg->fc_memget_ptr = *((uint8_t **)bp);
952 seg->fc_memget_cnt--;
955 /* Initialize buffer */
956 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
957 bzero(bp, seg->fc_memsize);
958 } else {
959 mp = (MATCHMAP *)bp;
960 mp->fc_mptr = NULL;
961 mp->flag |= MAP_POOL_ALLOCATED;
964 /* Set fc_low if pool is dynamic */
965 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
966 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
967 if (free_count < seg->fc_low) {
968 seg->fc_low = free_count;
972 mutex_exit(&EMLXS_MEMGET_LOCK);
974 return (bp);
976 } /* emlxs_mem_pool_get() */
979 extern void
980 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
982 emlxs_port_t *port = &PPORT;
983 MATCHMAP *mp;
985 /* Free the pool object */
986 mutex_enter(&EMLXS_MEMPUT_LOCK);
988 /* Check if memory pool is PUT enabled */
989 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
990 mutex_exit(&EMLXS_MEMPUT_LOCK);
991 return;
994 /* Check if buffer was just freed */
995 if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
997 "%s: Freeing free object: bp=%p", seg->fc_label, bp);
999 mutex_exit(&EMLXS_MEMPUT_LOCK);
1000 return;
1003 /* Validate DMA buffer */
1004 if (seg->fc_memflag & FC_MBUF_DMA) {
1005 mp = (MATCHMAP *)bp;
1007 if (!(mp->flag & MAP_POOL_ALLOCATED) ||
1008 (mp->segment != seg)) {
1009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1010 "mem_pool_put: %s invalid: mp=%p " \
1011 "tag=0x%x flag=%x", seg->fc_label,
1012 mp, mp->tag, mp->flag);
1014 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1016 mutex_exit(&EMLXS_MEMPUT_LOCK);
1018 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1019 NULL, NULL);
1021 return;
1025 /* Release buffer to the end of the memput list */
1026 if (seg->fc_memput_end == NULL) {
1027 seg->fc_memput_ptr = bp;
1028 seg->fc_memput_cnt = 1;
1029 } else {
1030 *((void **)(seg->fc_memput_end)) = bp;
1031 seg->fc_memput_cnt++;
1033 seg->fc_memput_end = bp;
1034 *((void **)(bp)) = NULL;
1036 mutex_exit(&EMLXS_MEMPUT_LOCK);
1038 /* This is for late PUT's after an initial */
1039 /* emlxs_mem_pool_destroy call */
1040 if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1041 !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1042 emlxs_mem_pool_destroy(hba, seg);
1045 return;
1047 } /* emlxs_mem_pool_put() */
1050 extern MATCHMAP *
1051 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1053 emlxs_port_t *port = &PPORT;
1054 uint8_t *bp = NULL;
1055 MATCHMAP *mp = NULL;
1056 MBUF_INFO *buf_info;
1057 MBUF_INFO bufinfo;
1059 buf_info = &bufinfo;
1061 bzero(buf_info, sizeof (MBUF_INFO));
1062 buf_info->size = sizeof (MATCHMAP);
1063 buf_info->align = sizeof (void *);
1065 (void) emlxs_mem_alloc(hba, buf_info);
1066 if (buf_info->virt == NULL) {
1067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1068 "MEM_BUF_ALLOC buffer.");
1070 return (NULL);
1073 mp = (MATCHMAP *)buf_info->virt;
1074 bzero(mp, sizeof (MATCHMAP));
1076 bzero(buf_info, sizeof (MBUF_INFO));
1077 buf_info->size = size;
1078 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1079 buf_info->align = 32;
1081 (void) emlxs_mem_alloc(hba, buf_info);
1082 if (buf_info->virt == NULL) {
1084 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1085 "MEM_BUF_ALLOC DMA buffer.");
1087 /* Free the mp object */
1088 bzero(buf_info, sizeof (MBUF_INFO));
1089 buf_info->size = sizeof (MATCHMAP);
1090 buf_info->virt = (void *)mp;
1091 emlxs_mem_free(hba, buf_info);
1093 return (NULL);
1095 bp = (uint8_t *)buf_info->virt;
1096 bzero(bp, buf_info->size);
1098 mp->virt = buf_info->virt;
1099 mp->phys = buf_info->phys;
1100 mp->size = buf_info->size;
1101 mp->dma_handle = buf_info->dma_handle;
1102 mp->data_handle = buf_info->data_handle;
1103 mp->tag = MEM_BUF;
1104 mp->flag |= MAP_BUF_ALLOCATED;
1106 return (mp);
1108 } /* emlxs_mem_buf_alloc() */
1111 extern void
1112 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
1114 MBUF_INFO bufinfo;
1115 MBUF_INFO *buf_info;
1117 buf_info = &bufinfo;
1119 if (!(mp->flag & MAP_BUF_ALLOCATED)) {
1120 return;
1123 bzero(buf_info, sizeof (MBUF_INFO));
1124 buf_info->size = mp->size;
1125 buf_info->virt = mp->virt;
1126 buf_info->phys = mp->phys;
1127 buf_info->dma_handle = mp->dma_handle;
1128 buf_info->data_handle = mp->data_handle;
1129 buf_info->flags = FC_MBUF_DMA;
1130 emlxs_mem_free(hba, buf_info);
1132 bzero(buf_info, sizeof (MBUF_INFO));
1133 buf_info->size = sizeof (MATCHMAP);
1134 buf_info->virt = (void *)mp;
1135 emlxs_mem_free(hba, buf_info);
1137 return;
1139 } /* emlxs_mem_buf_free() */
1142 extern void *
1143 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
1145 emlxs_port_t *port = &PPORT;
1146 void *bp;
1147 MAILBOXQ *mbq;
1148 IOCBQ *iocbq;
1149 NODELIST *node;
1150 MEMSEG *seg;
1152 if (seg_id >= FC_MAX_SEG) {
1154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1155 "mem_get: Invalid segment id = %d",
1156 seg_id);
1158 return (NULL);
1160 seg = &hba->memseg[seg_id];
1162 /* Alloc a buffer from the pool */
1163 bp = emlxs_mem_pool_get(hba, seg);
1165 if (bp) {
1166 switch (seg_id) {
1167 case MEM_MBOX:
1168 mbq = (MAILBOXQ *)bp;
1169 mbq->flag |= MBQ_POOL_ALLOCATED;
1170 break;
1172 case MEM_IOCB:
1173 iocbq = (IOCBQ *)bp;
1174 iocbq->flag |= IOCB_POOL_ALLOCATED;
1175 break;
1177 case MEM_NLP:
1178 node = (NODELIST *)bp;
1179 node->flag |= NODE_POOL_ALLOCATED;
1180 break;
1184 return (bp);
1186 } /* emlxs_mem_get() */
1189 extern void
1190 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp)
1192 emlxs_port_t *port = &PPORT;
1193 MAILBOXQ *mbq;
1194 IOCBQ *iocbq;
1195 NODELIST *node;
1196 MEMSEG *seg;
1197 MATCHMAP *mp;
1199 if (seg_id >= FC_MAX_SEG) {
1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1202 "mem_put: Invalid segment id = %d: bp=%p",
1203 seg_id, bp);
1205 return;
1207 seg = &hba->memseg[seg_id];
1209 /* Verify buffer */
1210 switch (seg_id) {
1211 case MEM_MBOX:
1212 mbq = (MAILBOXQ *)bp;
1214 if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1215 return;
1217 break;
1219 case MEM_IOCB:
1220 iocbq = (IOCBQ *)bp;
1222 if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1223 return;
1226 /* Any IOCBQ with a packet attached did not come */
1227 /* from our pool */
1228 if (iocbq->sbp) {
1229 return;
1231 break;
1233 case MEM_NLP:
1234 node = (NODELIST *)bp;
1236 if (!(node->flag & NODE_POOL_ALLOCATED)) {
1237 return;
1239 break;
1241 default:
1242 mp = (MATCHMAP *)bp;
1244 if (mp->flag & MAP_BUF_ALLOCATED) {
1245 emlxs_mem_buf_free(hba, mp);
1246 return;
1249 if (mp->flag & MAP_TABLE_ALLOCATED) {
1250 return;
1253 if (!(mp->flag & MAP_POOL_ALLOCATED)) {
1254 return;
1256 break;
1259 /* Free a buffer to the pool */
1260 emlxs_mem_pool_put(hba, seg, bp);
1262 return;
1264 } /* emlxs_mem_put() */
1268 * Look up the virtual address given a mapped address
1270 /* SLI3 */
1271 extern MATCHMAP *
1272 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1274 emlxs_port_t *port = &PPORT;
1275 MATCHMAP *prev;
1276 MATCHMAP *mp;
1278 if (rp->ringno == hba->channel_els) {
1279 mp = (MATCHMAP *)rp->fc_mpoff;
1280 prev = 0;
1282 while (mp) {
1283 if (mp->phys == mapbp) {
1284 if (prev == 0) {
1285 rp->fc_mpoff = mp->fc_mptr;
1286 } else {
1287 prev->fc_mptr = mp->fc_mptr;
1290 if (rp->fc_mpon == mp) {
1291 rp->fc_mpon = (void *)prev;
1294 mp->fc_mptr = NULL;
1296 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1297 DDI_DMA_SYNC_FORKERNEL);
1299 HBASTATS.ElsUbPosted--;
1301 return (mp);
1304 prev = mp;
1305 mp = (MATCHMAP *)mp->fc_mptr;
1308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1309 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1310 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1312 } else if (rp->ringno == hba->channel_ct) {
1314 mp = (MATCHMAP *)rp->fc_mpoff;
1315 prev = 0;
1317 while (mp) {
1318 if (mp->phys == mapbp) {
1319 if (prev == 0) {
1320 rp->fc_mpoff = mp->fc_mptr;
1321 } else {
1322 prev->fc_mptr = mp->fc_mptr;
1325 if (rp->fc_mpon == mp) {
1326 rp->fc_mpon = (void *)prev;
1329 mp->fc_mptr = NULL;
1331 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1332 DDI_DMA_SYNC_FORKERNEL);
1334 HBASTATS.CtUbPosted--;
1336 return (mp);
1339 prev = mp;
1340 mp = (MATCHMAP *)mp->fc_mptr;
1343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1344 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1345 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1347 } else if (rp->ringno == hba->channel_ip) {
1349 mp = (MATCHMAP *)rp->fc_mpoff;
1350 prev = 0;
1352 while (mp) {
1353 if (mp->phys == mapbp) {
1354 if (prev == 0) {
1355 rp->fc_mpoff = mp->fc_mptr;
1356 } else {
1357 prev->fc_mptr = mp->fc_mptr;
1360 if (rp->fc_mpon == mp) {
1361 rp->fc_mpon = (void *)prev;
1364 mp->fc_mptr = NULL;
1366 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1367 DDI_DMA_SYNC_FORKERNEL);
1369 HBASTATS.IpUbPosted--;
1371 return (mp);
1374 prev = mp;
1375 mp = (MATCHMAP *)mp->fc_mptr;
1378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1379 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1380 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1382 #ifdef SFCT_SUPPORT
1383 } else if (rp->ringno == hba->CHANNEL_FCT) {
1384 mp = (MATCHMAP *)rp->fc_mpoff;
1385 prev = 0;
1387 while (mp) {
1388 if (mp->phys == mapbp) {
1389 if (prev == 0) {
1390 rp->fc_mpoff = mp->fc_mptr;
1391 } else {
1392 prev->fc_mptr = mp->fc_mptr;
1395 if (rp->fc_mpon == mp) {
1396 rp->fc_mpon = (void *)prev;
1399 mp->fc_mptr = NULL;
1401 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1402 DDI_DMA_SYNC_FORKERNEL);
1404 HBASTATS.FctUbPosted--;
1406 return (mp);
1409 prev = mp;
1410 mp = (MATCHMAP *)mp->fc_mptr;
1413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1414 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1415 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1417 #endif /* SFCT_SUPPORT */
1420 return (0);
1422 } /* emlxs_mem_get_vaddr() */
1426 * Given a virtual address bp, generate the physical mapped address and
1427 * place it where addr points to. Save the address pair for lookup later.
1429 /* SLI3 */
1430 extern void
1431 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1432 uint32_t *haddr, uint32_t *laddr)
1434 if (rp->ringno == hba->channel_els) {
1436 * Update slot fc_mpon points to then bump it
1437 * fc_mpoff is pointer head of the list.
1438 * fc_mpon is pointer tail of the list.
1440 mp->fc_mptr = NULL;
1441 if (rp->fc_mpoff == 0) {
1442 rp->fc_mpoff = (void *)mp;
1443 rp->fc_mpon = (void *)mp;
1444 } else {
1445 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1446 (void *)mp;
1447 rp->fc_mpon = (void *)mp;
1450 if (hba->flag & FC_SLIM2_MODE) {
1452 /* return mapped address */
1453 *haddr = PADDR_HI(mp->phys);
1454 /* return mapped address */
1455 *laddr = PADDR_LO(mp->phys);
1456 } else {
1457 /* return mapped address */
1458 *laddr = PADDR_LO(mp->phys);
1461 HBASTATS.ElsUbPosted++;
1463 } else if (rp->ringno == hba->channel_ct) {
1465 * Update slot fc_mpon points to then bump it
1466 * fc_mpoff is pointer head of the list.
1467 * fc_mpon is pointer tail of the list.
1469 mp->fc_mptr = NULL;
1470 if (rp->fc_mpoff == 0) {
1471 rp->fc_mpoff = (void *)mp;
1472 rp->fc_mpon = (void *)mp;
1473 } else {
1474 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1475 (void *)mp;
1476 rp->fc_mpon = (void *)mp;
1479 if (hba->flag & FC_SLIM2_MODE) {
1480 /* return mapped address */
1481 *haddr = PADDR_HI(mp->phys);
1482 /* return mapped address */
1483 *laddr = PADDR_LO(mp->phys);
1484 } else {
1485 /* return mapped address */
1486 *laddr = PADDR_LO(mp->phys);
1489 HBASTATS.CtUbPosted++;
1492 } else if (rp->ringno == hba->channel_ip) {
1494 * Update slot fc_mpon points to then bump it
1495 * fc_mpoff is pointer head of the list.
1496 * fc_mpon is pointer tail of the list.
1498 mp->fc_mptr = NULL;
1499 if (rp->fc_mpoff == 0) {
1500 rp->fc_mpoff = (void *)mp;
1501 rp->fc_mpon = (void *)mp;
1502 } else {
1503 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1504 (void *)mp;
1505 rp->fc_mpon = (void *)mp;
1508 if (hba->flag & FC_SLIM2_MODE) {
1509 /* return mapped address */
1510 *haddr = PADDR_HI(mp->phys);
1511 *laddr = PADDR_LO(mp->phys);
1512 } else {
1513 *laddr = PADDR_LO(mp->phys);
1516 HBASTATS.IpUbPosted++;
1519 #ifdef SFCT_SUPPORT
1520 } else if (rp->ringno == hba->CHANNEL_FCT) {
1522 * Update slot fc_mpon points to then bump it
1523 * fc_mpoff is pointer head of the list.
1524 * fc_mpon is pointer tail of the list.
1526 mp->fc_mptr = NULL;
1527 if (rp->fc_mpoff == 0) {
1528 rp->fc_mpoff = (void *)mp;
1529 rp->fc_mpon = (void *)mp;
1530 } else {
1531 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1532 (void *)mp;
1533 rp->fc_mpon = (void *)mp;
1536 if (hba->flag & FC_SLIM2_MODE) {
1537 /* return mapped address */
1538 *haddr = PADDR_HI(mp->phys);
1539 /* return mapped address */
1540 *laddr = PADDR_LO(mp->phys);
1541 } else {
1542 /* return mapped address */
1543 *laddr = PADDR_LO(mp->phys);
1546 HBASTATS.FctUbPosted++;
1548 #endif /* SFCT_SUPPORT */
1550 } /* emlxs_mem_map_vaddr() */
1553 /* SLI3 */
1554 uint32_t
1555 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1557 emlxs_port_t *port = &PPORT;
1558 HBQ_INIT_t *hbq;
1559 MBUF_INFO *buf_info;
1560 MBUF_INFO bufinfo;
1562 hbq = &hba->sli.sli3.hbq_table[hbq_id];
1564 if (hbq->HBQ_host_buf.virt == 0) {
1565 buf_info = &bufinfo;
1567 /* Get the system's page size in a DDI-compliant way. */
1568 bzero(buf_info, sizeof (MBUF_INFO));
1569 buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1570 buf_info->flags = FC_MBUF_DMA;
1571 buf_info->align = 4096;
1573 (void) emlxs_mem_alloc(hba, buf_info);
1575 if (buf_info->virt == NULL) {
1576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1577 "Unable to alloc HBQ.");
1578 return (ENOMEM);
1581 hbq->HBQ_host_buf.virt = buf_info->virt;
1582 hbq->HBQ_host_buf.phys = buf_info->phys;
1583 hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1584 hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1585 hbq->HBQ_host_buf.size = buf_info->size;
1586 hbq->HBQ_host_buf.tag = hbq_id;
1588 bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1591 return (0);
1593 } /* emlxs_hbq_alloc() */