14916 ehci_qh_pool_size is probably too low
[illumos-gate.git] / usr / src / uts / common / io / bnxe / bnxe_mm.c
blobe4e0d6859013415c966b0414351a18af2a6c7687
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2014 QLogic Corporation
24 * The contents of this file are subject to the terms of the
25 * QLogic End User License (the "License").
26 * You may not use this file except in compliance with the License.
28 * You can obtain a copy of the License at
29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 * QLogic_End_User_Software_License.txt
31 * See the License for the specific language governing permissions
32 * and limitations under the License.
36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
39 #include "bnxe.h"
41 #define BNXE_DEF_TX_BD_PAGE_CNT 12
42 #define BNXE_DEF_TX_COAL_BUF_CNT 10
44 typedef struct
46 int bufCnt;
47 int txBdPageCnt;
48 int txCoalBufCnt;
49 } BnxeHwPageConfig;
51 static BnxeHwPageConfig bnxeHwPageConfigs[] =
53 /* Buffers TX BD Pages TX Coalesce Bufs */
54 { 1000, 4, 10 },
55 { 1500, 6, 10 },
56 { 3000, 12, 10 },
57 { 0, 0, 0 }
60 #if 0
61 #define MEM_LOG BnxeLogInfo
62 #else
63 #define MEM_LOG
64 #endif
66 ddi_device_acc_attr_t bnxeAccessAttribBAR =
68 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
69 DDI_STRUCTURE_LE_ACC, /* devacc_attr_endian_flags */
70 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
71 DDI_DEFAULT_ACC /* devacc_attr_access */
74 ddi_device_acc_attr_t bnxeAccessAttribBUF =
76 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
77 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
78 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
79 DDI_DEFAULT_ACC /* devacc_attr_access */
82 ddi_dma_attr_t bnxeDmaPageAttrib =
84 DMA_ATTR_V0, /* dma_attr_version */
85 0, /* dma_attr_addr_lo */
86 0xffffffffffffffff, /* dma_attr_addr_hi */
87 0xffffffffffffffff, /* dma_attr_count_max */
88 0, /* dma_attr_align */
89 0xffffffff, /* dma_attr_burstsizes */
90 1, /* dma_attr_minxfer */
91 0xffffffffffffffff, /* dma_attr_maxxfer */
92 0xffffffffffffffff, /* dma_attr_seg */
93 1, /* dma_attr_sgllen */
94 1, /* dma_attr_granular */
95 0, /* dma_attr_flags */
99 void mm_wait(lm_device_t * pDev,
100 u32_t delayUs)
102 (void)pDev;
103 drv_usecwait(delayUs);
107 lm_status_t mm_read_pci(lm_device_t * pDev,
108 u32_t pciReg,
109 u32_t * pRegValue)
111 um_device_t * pUM = (um_device_t *)pDev;
113 *pRegValue = pci_config_get32(pUM->pPciCfg, (off_t)pciReg);
115 return LM_STATUS_SUCCESS;
119 lm_status_t mm_write_pci(lm_device_t * pDev,
120 u32_t pciReg,
121 u32_t regValue)
123 um_device_t * pUM = (um_device_t *)pDev;
125 pci_config_put32(pUM->pPciCfg, (off_t)pciReg, regValue);
127 return LM_STATUS_SUCCESS;
131 void BnxeInitBdCnts(um_device_t * pUM,
132 int cli_idx)
134 lm_device_t * pLM = (lm_device_t *)pUM;
135 BnxeHwPageConfig * pPageCfg;
137 pLM->params.l2_tx_bd_page_cnt[cli_idx] = BNXE_DEF_TX_BD_PAGE_CNT;
138 pLM->params.l2_tx_coal_buf_cnt[cli_idx] = BNXE_DEF_TX_COAL_BUF_CNT;
140 pPageCfg = &bnxeHwPageConfigs[0];
141 while (pPageCfg->bufCnt)
143 if (pLM->params.l2_rx_desc_cnt[cli_idx] <= pPageCfg->bufCnt)
145 pLM->params.l2_tx_bd_page_cnt[cli_idx] = pPageCfg->txBdPageCnt;
146 pLM->params.l2_tx_coal_buf_cnt[cli_idx] = pPageCfg->txCoalBufCnt;
147 break;
150 pPageCfg++;
155 extern u32_t LOG2(u32_t v);
156 unsigned long log2_align(unsigned long n);
158 lm_status_t mm_get_user_config(lm_device_t * pLM)
160 um_device_t * pUM = (um_device_t *)pLM;
161 u32_t total_size;
162 u32_t required_page_size;
164 BnxeCfgInit(pUM);
166 pLM->params.sw_config = LM_SWCFG_10G;
168 pLM->params.ofld_cap = (LM_OFFLOAD_TX_IP_CKSUM |
169 LM_OFFLOAD_RX_IP_CKSUM |
170 LM_OFFLOAD_TX_TCP_CKSUM |
171 LM_OFFLOAD_RX_TCP_CKSUM |
172 LM_OFFLOAD_TX_TCP6_CKSUM |
173 LM_OFFLOAD_RX_TCP6_CKSUM |
174 LM_OFFLOAD_TX_UDP_CKSUM |
175 LM_OFFLOAD_RX_UDP_CKSUM |
176 LM_OFFLOAD_TX_UDP6_CKSUM |
177 LM_OFFLOAD_RX_UDP6_CKSUM);
179 /* XXX Wake on LAN? */
180 //pLM->params.wol_cap = (LM_WAKE_UP_MODE_MAGIC_PACKET | LM_WAKE_UP_MODE_NWUF);
182 /* keep the VLAN tag in the mac header when receiving */
183 pLM->params.keep_vlan_tag = 1;
185 /* set in BnxeIntrInit based on the allocated number of MSIX interrupts */
186 //pLM->params.rss_chain_cnt = pUM->devParams.numRings;
187 //pLM->params.tss_chain_cnt = pUM->devParams.numRings;
189 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_NDIS] = pUM->devParams.numRxDesc[LM_CLI_IDX_NDIS];
190 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_NDIS] = 0;
191 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_NDIS] = 0;
193 BnxeInitBdCnts(pUM, LM_CLI_IDX_NDIS);
195 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_FWD] = 0;
196 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_FWD] = 0;
197 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_FWD] = 0;
199 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_ISCSI] = 0;
200 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_ISCSI] = 0;
201 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_ISCSI] = 0;
203 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_FCOE] = 0;
204 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_FCOE] = 0;
205 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_FCOE] = 0;
207 pLM->params.max_func_toe_cons = 0;
208 pLM->params.max_func_iscsi_cons = 0;
209 pLM->params.max_func_rdma_cons = 0;
210 pLM->params.max_func_fcoe_cons = pUM->lm_dev.hw_info.max_port_fcoe_conn;
211 pLM->params.max_func_connections =
212 log2_align(pLM->params.max_func_toe_cons +
213 pLM->params.max_func_rdma_cons +
214 pLM->params.max_func_iscsi_cons +
215 pLM->params.max_func_fcoe_cons +
216 MAX_ETH_CONS);
218 /* determine: 1. itl_client_page_size, #context in page*/
220 /* based on PCIe block INIT document */
222 /* We now need to calculate the page size based on the maximum number of
223 * connections supported. Since this property is identical to all ports, and
224 * is configured in COMMON registers, we need to use the maximum number of
225 * connections in all ports. */
227 /* The L2P table is used to map logical addresses to physical ones. There
228 * are four clients that use this table. We want to use only the ILT
229 * (Internal), we need to calculate the total size required for all clients,
230 * divide it by the number of entries in the ILT table and that will give us
231 * the page size we want. The following table describes the needs of each of
232 * these clients:
234 * HW block(L2P client) Area name Size [B]
235 * Searcher T1 ROUNDUP(LOG2(N)) * 64
236 * Timers Linear Array N * 8
237 * QM Queues N * 32 * 4
238 * CDU Context N * S + W * ROUNDUP (N/m) (W=0)
240 * N: Number of connections
241 * S: Context Size
242 * W: Block Waste (not really interesting) we configure the context size to
243 * be a power of 2.
244 * m: Number of cids in a block (not really interesting, since W will always
245 * be 0)
247 total_size = (pLM->hw_info.max_common_conns *
248 (SEARCHER_TOTAL_MEM_REQUIRED_PER_CON +
249 TIMERS_TOTAL_MEM_REQUIRED_PER_CON +
250 QM_TOTAL_MEM_REQUIRED_PER_CON +
251 pLM->params.context_line_size));
253 required_page_size = (total_size / ILT_NUM_PAGE_ENTRIES_PER_FUNC);
254 required_page_size = (2 << LOG2(required_page_size));
256 if (required_page_size < LM_PAGE_SIZE)
258 required_page_size = LM_PAGE_SIZE;
261 pLM->params.ilt_client_page_size = required_page_size;
262 pLM->params.num_context_in_page = (pLM->params.ilt_client_page_size /
263 pLM->params.context_line_size);
265 if (pUM->devParams.intrCoalesce)
267 pLM->params.int_coalesing_mode = LM_INT_COAL_PERIODIC_SYNC;
268 pLM->params.int_per_sec_rx_override = pUM->devParams.intrRxPerSec;
269 pLM->params.int_per_sec_tx_override = pUM->devParams.intrTxPerSec;
271 else
273 pLM->params.int_coalesing_mode = LM_INT_COAL_NONE;
276 pLM->params.enable_dynamic_hc[0] = 0;
277 pLM->params.enable_dynamic_hc[1] = 0;
278 pLM->params.enable_dynamic_hc[2] = 0;
279 pLM->params.enable_dynamic_hc[3] = 0;
282 * l2_fw_flow_ctrl is read from the shmem in MF mode in E2 and above. In
283 * all other cases this parameter is read from the driver conf. We also
284 * read this parameter from the driver conf in E1.5 MF mode since 57711
285 * boot code does not have the struct func_ext_cfg.
287 if (((pLM->hw_info.mf_info.mf_mode != MULTI_FUNCTION_SI) &&
288 (pLM->hw_info.mf_info.mf_mode != MULTI_FUNCTION_AFEX)) ||
289 (CHIP_IS_E1x(pLM)))
291 pLM->params.l2_fw_flow_ctrl = (pUM->devParams.l2_fw_flow_ctrl) ? 1 : 0;
294 pLM->params.rcv_buffer_offset = BNXE_DMA_RX_OFFSET;
296 pLM->params.debug_cap_flags = DEFAULT_DEBUG_CAP_FLAGS_VAL;
298 pLM->params.max_fcoe_task = lm_fc_max_fcoe_task_sup(pLM);
300 /* enable rate shaping */
301 pLM->params.cmng_enable = 1;
303 pLM->params.validate_sq_complete = 1;
305 return LM_STATUS_SUCCESS;
309 static boolean_t BnxeIsBarUsed(um_device_t * pUM,
310 int regNumber,
311 offset_t offset,
312 u32_t size)
314 BnxeMemRegion * pMem;
316 BNXE_LOCK_ENTER_MEM(pUM);
318 pMem = (BnxeMemRegion *)d_list_peek_head(&pUM->memRegionList);
320 while (pMem)
322 if ((pMem->regNumber == regNumber) &&
323 (pMem->offset == offset) &&
324 (pMem->size == size))
326 BNXE_LOCK_EXIT_MEM(pUM);
327 return B_TRUE;
330 pMem = (BnxeMemRegion *)d_list_next_entry(D_LINK_CAST(pMem));
333 BNXE_LOCK_EXIT_MEM(pUM);
334 return B_FALSE;
338 void * mm_map_io_base(lm_device_t * pLM,
339 lm_address_t baseAddr,
340 u32_t size,
341 u8_t bar)
343 um_device_t * pUM = (um_device_t *)pLM;
344 BnxeMemRegion * pMem;
345 //int numRegs;
346 off_t regSize;
347 int rc;
350 * Solaris identifies:
351 * BAR 0 - size 0 (pci config regs?)
352 * BAR 1 - size 0x800000 (Everest 1/2 LM BAR 0)
353 * BAR 2 - size 0x4000000 (Everest 1 LM BAR 1)
354 * 0x800000 (Everest 2 LM BAR 1)
355 * BAR 3 - size 0x10000 (Everest 2 LM BAR 2)
357 bar++;
359 //ddi_dev_nregs(pUM->pDev, &numRegs);
361 ddi_dev_regsize(pUM->pDev, bar, &regSize);
363 if ((size > regSize) || BnxeIsBarUsed(pUM, bar, 0, size))
365 BnxeLogWarn(pUM, "BAR %d at offset %d and size %d is already being used!",
366 bar, 0, (int)regSize);
367 return NULL;
370 if ((pMem = kmem_zalloc(sizeof(BnxeMemRegion), KM_NOSLEEP)) == NULL)
372 BnxeLogWarn(pUM, "Memory allocation for BAR %d at offset %d and size %d failed!",
373 bar, 0, (int)regSize);
374 return NULL;
377 if ((rc = ddi_regs_map_setup(pUM->pDev,
378 bar, // bar number
379 &pMem->pRegAddr,
380 0, // region map offset,
381 size, // region memory window size (0=all)
382 &bnxeAccessAttribBAR,
383 &pMem->regAccess)) != DDI_SUCCESS)
385 BnxeLogWarn(pUM, "Failed to memory map device (BAR=%d, offset=%d, size=%d) (%d)",
386 bar, 0, size, rc);
387 kmem_free(pMem, sizeof(BnxeMemRegion));
388 return NULL;
391 pMem->baseAddr = baseAddr;
392 pMem->regNumber = bar;
393 pMem->offset = 0;
394 pMem->size = size;
396 BNXE_LOCK_ENTER_MEM(pUM);
397 d_list_push_head(&pUM->memRegionList, D_LINK_CAST(pMem));
398 BNXE_LOCK_EXIT_MEM(pUM);
400 bar--;
401 pLM->vars.reg_handle[bar] = pMem->regAccess;
403 return pMem->pRegAddr;
407 void * mm_map_io_space_solaris(lm_device_t * pLM,
408 lm_address_t physAddr,
409 u8_t bar,
410 u32_t offset,
411 u32_t size,
412 ddi_acc_handle_t * pRegAccHandle)
414 um_device_t * pUM = (um_device_t *)pLM;
415 BnxeMemRegion * pMem;
416 off_t regSize;
417 int rc;
419 /* see bar mapping described in mm_map_io_base above */
420 bar++;
422 ddi_dev_regsize(pUM->pDev, bar, &regSize);
424 if ((size > regSize) || BnxeIsBarUsed(pUM, bar, offset, size))
426 BnxeLogWarn(pUM, "BAR %d at offset %d and size %d is already being used!",
427 bar, offset, (int)regSize);
428 return NULL;
431 if ((pMem = kmem_zalloc(sizeof(BnxeMemRegion), KM_NOSLEEP)) == NULL)
433 BnxeLogWarn(pUM, "Memory allocation for BAR %d at offset %d and size %d failed!",
434 bar, offset, (int)regSize);
435 return NULL;
438 if ((rc = ddi_regs_map_setup(pUM->pDev,
439 bar, // bar number
440 &pMem->pRegAddr,
441 offset, // region map offset,
442 size, // region memory window size (0=all)
443 &bnxeAccessAttribBAR,
444 pRegAccHandle)) != DDI_SUCCESS)
446 BnxeLogWarn(pUM, "Failed to memory map device (BAR=%d, offset=%d, size=%d) (%d)",
447 bar, offset, size, rc);
448 kmem_free(pMem, sizeof(BnxeMemRegion));
449 return NULL;
452 pMem->baseAddr = physAddr;
453 pMem->regNumber = bar;
454 pMem->offset = offset;
455 pMem->size = size;
456 pMem->regAccess = *pRegAccHandle;
458 BNXE_LOCK_ENTER_MEM(pUM);
459 d_list_push_head(&pUM->memRegionList, D_LINK_CAST(pMem));
460 BNXE_LOCK_EXIT_MEM(pUM);
462 return pMem->pRegAddr;
466 void mm_unmap_io_space(lm_device_t * pLM,
467 void * pVirtAddr,
468 u32_t size)
470 um_device_t * pUM = (um_device_t *)pLM;
471 BnxeMemRegion * pMemRegion;
473 BNXE_LOCK_ENTER_MEM(pUM);
475 pMemRegion = (BnxeMemRegion *)d_list_peek_head(&pUM->memRegionList);
477 while (pMemRegion)
479 if ((pMemRegion->pRegAddr == pVirtAddr) &&
480 (pMemRegion->size == size))
482 d_list_remove_entry(&pUM->memRegionList, D_LINK_CAST(pMemRegion));
483 ddi_regs_map_free(&pMemRegion->regAccess);
484 kmem_free(pMemRegion, sizeof(BnxeMemRegion));
485 break;
488 pMemRegion = (BnxeMemRegion *)d_list_next_entry(D_LINK_CAST(pMemRegion));
491 BNXE_LOCK_EXIT_MEM(pUM);
495 void * mm_alloc_mem_imp(lm_device_t * pLM,
496 u32_t memSize,
497 const char * sz_file,
498 const unsigned long line,
499 u8_t cli_idx)
501 um_device_t * pUM = (um_device_t *)pLM;
502 BnxeMemBlock * pMem;
503 void * pBuf;
504 u32_t * pTmp;
505 int i;
507 (void)cli_idx;
509 if ((pMem = kmem_zalloc(sizeof(BnxeMemBlock), KM_NOSLEEP)) == NULL)
511 return NULL;
514 /* allocated space for header/trailer checks */
515 memSize += (BNXE_MEM_CHECK_LEN * 2);
517 MEM_LOG(pUM, "*** MEM: %8u", memSize);
519 if ((pBuf = kmem_zalloc(memSize, KM_NOSLEEP)) == NULL)
521 BnxeLogWarn(pUM, "Failed to allocate memory");
522 kmem_free(pMem, sizeof(BnxeMemBlock));
523 return NULL;
526 /* fill in the header check */
527 for (i = 0, pTmp = (u32_t *)pBuf;
528 i < BNXE_MEM_CHECK_LEN;
529 i += 4, pTmp++)
531 *pTmp = BNXE_MAGIC;
534 /* fill in the trailer check */
535 for (i = 0, pTmp = (u32_t *)((char *)pBuf + memSize - BNXE_MEM_CHECK_LEN);
536 i < BNXE_MEM_CHECK_LEN;
537 i += 4, pTmp++)
539 *pTmp = BNXE_MAGIC;
542 pMem->size = memSize;
543 pMem->pBuf = pBuf;
544 snprintf(pMem->fileName, sizeof(pMem->fileName), "%s", sz_file);
545 pMem->fileLine = line;
547 BNXE_LOCK_ENTER_MEM(pUM);
548 d_list_push_head(&pUM->memBlockList, D_LINK_CAST(pMem));
549 BNXE_LOCK_EXIT_MEM(pUM);
551 MEM_LOG(pUM, "Allocated %d byte block virt:%p",
552 memSize, ((char *)pBuf + BNXE_MEM_CHECK_LEN));
554 return ((char *)pBuf + BNXE_MEM_CHECK_LEN);
558 void * mm_alloc_phys_mem_align_imp(lm_device_t * pLM,
559 u32_t memSize,
560 lm_address_t * pPhysAddr,
561 u32_t alignment,
562 u8_t memType,
563 const char * sz_file,
564 const unsigned long line,
565 u8_t cli_idx)
567 um_device_t * pUM = (um_device_t *)pLM;
568 int rc;
569 caddr_t pBuf;
570 size_t length;
571 unsigned int count;
572 ddi_dma_attr_t dmaAttrib;
573 ddi_dma_handle_t * pDmaHandle;
574 ddi_acc_handle_t * pDmaAccHandle;
575 ddi_dma_cookie_t cookie;
576 BnxeMemDma * pMem;
577 size_t size;
579 (void)memType;
580 (void)cli_idx;
582 if (memSize == 0)
584 return NULL;
587 if ((pMem = kmem_zalloc(sizeof(BnxeMemDma), KM_NOSLEEP)) == NULL)
589 return NULL;
592 dmaAttrib = bnxeDmaPageAttrib;
593 dmaAttrib.dma_attr_align = alignment;
595 pDmaHandle = &pMem->dmaHandle;
596 pDmaAccHandle = &pMem->dmaAccHandle;
598 size = memSize;
599 size += (alignment - 1);
600 size &= ~((u32_t)(alignment - 1));
602 MEM_LOG(pUM, "*** DMA: %8u (%4d) - %8u", memSize, alignment, size);
604 if ((rc = ddi_dma_alloc_handle(pUM->pDev,
605 &dmaAttrib,
606 DDI_DMA_DONTWAIT,
607 (void *)0,
608 pDmaHandle)) != DDI_SUCCESS)
610 BnxeLogWarn(pUM, "Failed to alloc DMA handle");
611 kmem_free(pMem, sizeof(BnxeMemDma));
612 return NULL;
615 if ((rc = ddi_dma_mem_alloc(*pDmaHandle,
616 size,
617 &bnxeAccessAttribBUF,
618 DDI_DMA_CONSISTENT,
619 DDI_DMA_DONTWAIT,
620 (void *)0,
621 &pBuf,
622 &length,
623 pDmaAccHandle)) != DDI_SUCCESS)
625 BnxeLogWarn(pUM, "Failed to alloc DMA memory");
626 ddi_dma_free_handle(pDmaHandle);
627 kmem_free(pMem, sizeof(BnxeMemDma));
628 return NULL;
631 if ((rc = ddi_dma_addr_bind_handle(*pDmaHandle,
632 (struct as *)0,
633 pBuf,
634 length,
635 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
636 DDI_DMA_DONTWAIT,
637 (void *)0,
638 &cookie,
639 &count)) != DDI_DMA_MAPPED)
641 BnxeLogWarn(pUM, "Failed to bind DMA address");
642 ddi_dma_mem_free(pDmaAccHandle);
643 ddi_dma_free_handle(pDmaHandle);
644 kmem_free(pMem, sizeof(BnxeMemDma));
645 return NULL;
648 pPhysAddr->as_u64 = cookie.dmac_laddress;
650 /* save the virtual memory address so we can get the dma_handle later */
651 pMem->size = memSize;
652 pMem->pDmaVirt = pBuf;
653 pMem->physAddr = *pPhysAddr;
654 snprintf(pMem->fileName, sizeof(pMem->fileName), "%s", sz_file);
655 pMem->fileLine = line;
657 #if 0
658 MEM_LOG(pUM, "*** DMA: virt %p / phys 0x%0llx (%d/%d)",
659 pBuf, pPhysAddr->as_u64,
660 (!((u32_t)pBuf % (u32_t)alignment)) ? 1 : 0,
661 (!((u32_t)pPhysAddr->as_ptr % (u32_t)alignment) ? 1 : 0));
662 #endif
664 BNXE_LOCK_ENTER_MEM(pUM);
665 d_list_push_head(&pUM->memDmaList, D_LINK_CAST(pMem));
666 BNXE_LOCK_EXIT_MEM(pUM);
668 MEM_LOG(pUM, "Allocated %d sized DMA block phys:%p virt:%p",
669 memSize, pMem->physAddr.as_ptr, pMem->pDmaVirt);
671 /* Zero memory! */
672 bzero(pBuf, length);
674 /* make sure the new contents are flushed back to main memory */
675 ddi_dma_sync(*pDmaHandle, 0, length, DDI_DMA_SYNC_FORDEV);
677 return pBuf;
681 void * mm_alloc_phys_mem_imp(lm_device_t * pLM,
682 u32_t memSize,
683 lm_address_t * pPhysAddr,
684 u8_t memType,
685 const char * sz_file,
686 const unsigned long line,
687 u8_t cli_idx)
689 return mm_alloc_phys_mem_align_imp(pLM, memSize, pPhysAddr,
690 BNXE_DMA_ALIGNMENT, memType,
691 sz_file, line, cli_idx);
695 void * mm_rt_alloc_mem_imp(lm_device_t * pDev,
696 u32_t memSize,
697 const char * sz_file,
698 const unsigned long line,
699 u8_t cli_idx)
701 return mm_alloc_mem_imp(pDev, memSize, sz_file, line, cli_idx);
705 void * mm_rt_alloc_phys_mem_imp(lm_device_t * pDev,
706 u32_t memSize,
707 lm_address_t * pPhysAddr,
708 u8_t flushType,
709 const char * sz_file,
710 const unsigned long line,
711 u8_t cli_idx)
713 return mm_alloc_phys_mem_imp(pDev, memSize, pPhysAddr, flushType,
714 sz_file, line, cli_idx);
718 u64_t mm_get_current_time(lm_device_t * pDev)
720 um_device_t * pUM = (um_device_t *)pDev;
721 BnxeDbgBreakMsg(pUM, "MM_GET_CURRENT_TIME");
722 return 0;
726 void mm_rt_free_mem(lm_device_t * pDev,
727 void * pBuf,
728 u32_t memSize,
729 u8_t cli_idx)
731 um_device_t * pUM = (um_device_t *)pDev;
732 BnxeMemBlock * pMem;
733 u32_t * pTmp;
734 int i;
736 (void)cli_idx;
738 BNXE_LOCK_ENTER_MEM(pUM);
740 pMem = (BnxeMemBlock *)d_list_peek_head(&pUM->memBlockList);
742 /* adjuest for header/trailer checks */
743 pBuf = ((char *)pBuf - BNXE_MEM_CHECK_LEN);
744 memSize += (BNXE_MEM_CHECK_LEN * 2);
746 /* verify header check */
747 for (i = 0, pTmp = (u32_t *)pBuf;
748 i < BNXE_MEM_CHECK_LEN;
749 i += 4, pTmp++)
751 if (*pTmp != BNXE_MAGIC)
753 BnxeLogWarn(pUM, "Header overflow! (%p/%u)", pBuf, memSize);
754 BnxeDbgBreak(pUM);
758 /* verify trailer check */
759 for (i = 0, pTmp = (u32_t *)((char *)pBuf + memSize - BNXE_MEM_CHECK_LEN);
760 i < BNXE_MEM_CHECK_LEN;
761 i += 4, pTmp++)
763 if (*pTmp != BNXE_MAGIC)
765 BnxeLogWarn(pUM, "Trailer overflow! (%p/%u)", pBuf, memSize);
766 BnxeDbgBreak(pUM);
770 while (pMem)
772 if (pBuf == pMem->pBuf)
774 if (memSize != pMem->size)
776 /* Uh-Oh! */
777 BnxeLogWarn(pUM, "Attempt to free memory block with invalid size (%d/%d)",
778 memSize, pMem->size);
779 BnxeDbgBreak(pUM);
781 BNXE_LOCK_EXIT_MEM(pUM);
782 return;
785 d_list_remove_entry(&pUM->memBlockList, D_LINK_CAST(pMem));
787 kmem_free(pBuf, memSize);
788 kmem_free(pMem, sizeof(BnxeMemBlock));
790 BNXE_LOCK_EXIT_MEM(pUM);
791 return;
794 pMem = (BnxeMemBlock *)d_list_next_entry(D_LINK_CAST(pMem));
797 BNXE_LOCK_EXIT_MEM(pUM);
801 void mm_rt_free_phys_mem(lm_device_t * pDev,
802 u32_t memSize,
803 void * pBuf,
804 lm_address_t pPhysAddr,
805 u8_t cli_idx)
807 um_device_t * pUM = (um_device_t *)pDev;
808 BnxeMemDma * pMem;
810 (void)pPhysAddr;
811 (void)cli_idx;
813 BNXE_LOCK_ENTER_MEM(pUM);
815 pMem = (BnxeMemDma *)d_list_peek_head(&pUM->memDmaList);
817 while (pMem)
819 if (pBuf == pMem->pDmaVirt)
821 if (memSize != pMem->size)
823 /* Uh-Oh! */
824 BnxeLogWarn(pUM, "Attempt to free DMA memory with invalid size (%d/%d)",
825 memSize, pMem->size);
826 BnxeDbgBreak(pUM);
828 BNXE_LOCK_EXIT_MEM(pUM);
829 return;
832 d_list_remove_entry(&pUM->memDmaList, D_LINK_CAST(pMem));
834 ddi_dma_unbind_handle(pMem->dmaHandle);
835 ddi_dma_mem_free(&pMem->dmaAccHandle);
836 ddi_dma_free_handle(&pMem->dmaHandle);
837 kmem_free(pMem, sizeof(BnxeMemDma));
839 BNXE_LOCK_EXIT_MEM(pUM);
840 return;
843 pMem = (BnxeMemDma *)d_list_next_entry(D_LINK_CAST(pMem));
846 BNXE_LOCK_EXIT_MEM(pUM);
850 void mm_memset(void * pBuf,
851 u8_t val,
852 u32_t memSize)
854 memset(pBuf, val, memSize);
858 void mm_memcpy(void * pDest,
859 const void * pSrc,
860 u32_t memSize)
862 memcpy(pDest, pSrc, memSize);
866 u8_t mm_memcmp(void * pBuf1,
867 void * pBuf2,
868 u32_t count)
870 return (memcmp(pBuf1, pBuf2, count) == 0) ? 1 : 0;
874 void mm_indicate_tx(lm_device_t * pLM,
875 u32_t idx,
876 s_list_t * packet_list)
878 BnxeTxPktsReclaim((um_device_t *)pLM, idx, packet_list);
882 void mm_set_done(lm_device_t * pDev,
883 u32_t cid,
884 void * cookie)
886 #if 0
887 um_device_t * pUM = (um_device_t *)pDev;
888 BnxeLogInfo(pUM, "RAMROD on cid %d cmd is done", cid);
889 #else
890 (void)pDev;
891 (void)cid;
892 #endif
896 void mm_return_sq_pending_command(lm_device_t * pDev,
897 struct sq_pending_command * pPending)
899 /* XXX probably need a memory pool to pull from... */
900 mm_rt_free_mem(pDev, pPending, sizeof(struct sq_pending_command),
901 LM_CLI_IDX_NDIS);
905 struct sq_pending_command * mm_get_sq_pending_command(lm_device_t * pDev)
907 /* XXX probably need a memory pool to pull from... */
908 return mm_rt_alloc_mem(pDev, sizeof(struct sq_pending_command),
909 LM_CLI_IDX_NDIS);
913 u32_t mm_copy_packet_buf(lm_device_t * pDev,
914 lm_packet_t * pLMPkt,
915 u8_t * pMemBuf,
916 u32_t size)
918 //um_device_t * pUM = (um_device_t *)pDev;
919 um_txpacket_t * pTxPkt = (um_txpacket_t *)pLMPkt;
920 mblk_t * pMblk;
921 u32_t copied;
922 u32_t mblkDataLen;
923 u32_t toCopy;
925 pMblk = pTxPkt->pMblk;
926 copied = 0;
928 while (size && pMblk)
930 mblkDataLen = (pMblk->b_wptr - pMblk->b_rptr);
931 toCopy = (mblkDataLen <= size) ? mblkDataLen : size;
933 bcopy(pMblk->b_rptr, pMemBuf, toCopy);
935 pMemBuf += toCopy;
936 copied += toCopy;
937 size -= toCopy;
939 pMblk = pMblk->b_cont;
942 return copied;
946 lm_status_t mm_fan_failure(lm_device_t * pDev)
948 um_device_t * pUM = (um_device_t *)pDev;
949 BnxeLogWarn(pUM, "FAN FAILURE!");
950 return LM_STATUS_SUCCESS;
954 static void BnxeLinkStatus(um_device_t * pUM,
955 lm_status_t link,
956 lm_medium_t medium)
958 #define TBUF_SIZE 64
959 char tbuf[TBUF_SIZE];
960 char * pDuplex;
961 char * pRxFlow;
962 char * pTxFlow;
963 char * pSpeed;
965 if (link != LM_STATUS_LINK_ACTIVE)
967 /* reset the link status */
968 pUM->props.link_speed = 0;
969 pUM->props.link_duplex = B_FALSE;
970 pUM->props.link_txpause = B_FALSE;
971 pUM->props.link_rxpause = B_FALSE;
972 pUM->props.uptime = 0;
974 /* reset the link partner status */
975 pUM->remote.link_autoneg = B_FALSE;
976 pUM->remote.param_20000fdx = B_FALSE;
977 pUM->remote.param_10000fdx = B_FALSE;
978 pUM->remote.param_2500fdx = B_FALSE;
979 pUM->remote.param_1000fdx = B_FALSE;
980 pUM->remote.param_100fdx = B_FALSE;
981 pUM->remote.param_100hdx = B_FALSE;
982 pUM->remote.param_10fdx = B_FALSE;
983 pUM->remote.param_10hdx = B_FALSE;
984 pUM->remote.param_txpause = B_FALSE;
985 pUM->remote.param_rxpause = B_FALSE;
987 BnxeLogInfo(pUM, "Link Down");
988 return;
991 pUM->props.uptime = ddi_get_time();
993 if (GET_MEDIUM_DUPLEX(medium) == LM_MEDIUM_HALF_DUPLEX)
995 pDuplex = "Half";
996 pUM->props.link_duplex = B_FALSE;
998 else
1000 pDuplex = "Full";
1001 pUM->props.link_duplex = B_TRUE;
1004 if (pUM->lm_dev.vars.flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE)
1006 pRxFlow = "ON";
1007 pUM->props.link_rxpause = B_TRUE;
1009 else
1011 pRxFlow = "OFF";
1012 pUM->props.link_rxpause = B_FALSE;
1015 if (pUM->lm_dev.vars.flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
1017 pTxFlow = "ON";
1018 pUM->props.link_txpause = B_TRUE;
1020 else
1022 pTxFlow = "OFF";
1023 pUM->props.link_txpause = B_FALSE;
1026 #if 0
1027 if (pUM->curcfg.lnkcfg.link_autoneg == B_TRUE)
1029 BnxeUpdateLpCap(pUM);
1031 #endif
1033 switch (GET_MEDIUM_SPEED(medium))
1035 case LM_MEDIUM_SPEED_10MBPS:
1037 pUM->props.link_speed = 10;
1038 pSpeed = "10Mb";
1039 break;
1041 case LM_MEDIUM_SPEED_100MBPS:
1043 pUM->props.link_speed = 100;
1044 pSpeed = "100Mb";
1045 break;
1047 case LM_MEDIUM_SPEED_1000MBPS:
1049 pUM->props.link_speed = 1000;
1050 pSpeed = "1Gb";
1051 break;
1053 case LM_MEDIUM_SPEED_2500MBPS:
1055 pUM->props.link_speed = 2500;
1056 pSpeed = "2.5Gb";
1057 break;
1059 case LM_MEDIUM_SPEED_10GBPS:
1061 pUM->props.link_speed = 10000;
1062 pSpeed = "10Gb";
1063 break;
1065 case LM_MEDIUM_SPEED_12GBPS:
1067 pUM->props.link_speed = 12000;
1068 pSpeed = "12Gb";
1069 break;
1071 case LM_MEDIUM_SPEED_12_5GBPS:
1073 pUM->props.link_speed = 12500;
1074 pSpeed = "12.5Gb";
1075 break;
1077 case LM_MEDIUM_SPEED_13GBPS:
1079 pUM->props.link_speed = 13000;
1080 pSpeed = "13Gb";
1081 break;
1083 case LM_MEDIUM_SPEED_15GBPS:
1085 pUM->props.link_speed = 15000;
1086 pSpeed = "15Gb";
1087 break;
1089 case LM_MEDIUM_SPEED_16GBPS:
1091 pUM->props.link_speed = 16000;
1092 pSpeed = "16Gb";
1093 break;
1095 case LM_MEDIUM_SPEED_20GBPS:
1097 pUM->props.link_speed = 20000;
1098 pSpeed = "20Gb";
1099 break;
1101 default:
1103 if ((GET_MEDIUM_SPEED(medium) >= LM_MEDIUM_SPEED_SEQ_START) &&
1104 (GET_MEDIUM_SPEED(medium) <= LM_MEDIUM_SPEED_SEQ_END))
1106 pUM->props.link_speed = (((GET_MEDIUM_SPEED(medium) >> 8) -
1107 (LM_MEDIUM_SPEED_SEQ_START >> 8) +
1108 1) * 100);
1109 snprintf(tbuf, TBUF_SIZE, "%u", pUM->props.link_speed);
1110 pSpeed = tbuf;
1111 break;
1114 pUM->props.link_speed = 0;
1115 pSpeed = "";
1117 break;
1120 if (*pSpeed == 0)
1122 BnxeLogInfo(pUM, "%s Duplex Rx Flow %s Tx Flow %s Link Up",
1123 pDuplex, pRxFlow, pTxFlow);
1125 else
1127 BnxeLogInfo(pUM, "%s %s Duplex Rx Flow %s Tx Flow %s Link Up",
1128 pSpeed, pDuplex, pRxFlow, pTxFlow);
1133 void mm_indicate_link(lm_device_t * pLM,
1134 lm_status_t link,
1135 lm_medium_t medium)
1137 um_device_t * pUM = (um_device_t *)pLM;
1139 /* ignore link status if it has not changed since the last indicate */
1140 if ((pUM->devParams.lastIndLink == link) &&
1141 (pUM->devParams.lastIndMedium == medium))
1143 return;
1146 pUM->devParams.lastIndLink = link;
1147 pUM->devParams.lastIndMedium = medium;
1149 BnxeLinkStatus(pUM, link, medium);
1151 if (CLIENT_BOUND(pUM, LM_CLI_IDX_NDIS))
1153 BnxeGldLink(pUM, (link == LM_STATUS_LINK_ACTIVE) ?
1154 LINK_STATE_UP : LINK_STATE_DOWN);
1157 if (CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE))
1159 if (pUM->fcoe.pDev == NULL)
1161 BnxeLogWarn(pUM, "FCoE Client bound and pDev is NULL (LINK STATUS failed!) %s@%s",
1162 BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1164 else if (pUM->fcoe.bind.cliCtl == NULL)
1166 BnxeLogWarn(pUM, "FCoE Client bound and cliCtl is NULL (LINK STATUS failed!) %s@%s",
1167 BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1169 else
1171 pUM->fcoe.bind.cliCtl(pUM->fcoe.pDev,
1172 (link == LM_STATUS_LINK_ACTIVE) ?
1173 CLI_CTL_LINK_UP : CLI_CTL_LINK_DOWN,
1174 NULL,
1181 lm_status_t mm_schedule_task(lm_device_t * pDev,
1182 u32_t delay_ms,
1183 lm_task_cb_t task,
1184 void * param)
1186 um_device_t * pUM = (um_device_t *)pDev;
1188 BnxeWorkQueueAddDelayNoCopy(pUM, (void (*)(um_device_t *, void *))task, param, delay_ms);
1190 return LM_STATUS_SUCCESS;
1194 lm_status_t mm_register_lpme(lm_device_t * pDev,
1195 lm_generic_workitem_function * func,
1196 u8_t b_fw_access,
1197 u8_t b_queue_for_fw)
1199 um_device_t * pUM = (um_device_t *)pDev;
1201 (void)b_fw_access;
1202 (void)b_queue_for_fw;
1204 BnxeWorkQueueAddGeneric(pUM, (void (*)(um_device_t *))func);
1206 return LM_STATUS_SUCCESS;
1210 void MM_ACQUIRE_SPQ_LOCK_IMP(lm_device_t * pDev)
1212 BNXE_LOCK_ENTER_SPQ((um_device_t *)pDev);
1216 void MM_RELEASE_SPQ_LOCK_IMP(lm_device_t * pDev)
1218 BNXE_LOCK_EXIT_SPQ((um_device_t *)pDev);
1222 void MM_ACQUIRE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)
1224 BNXE_LOCK_ENTER_SPQ((um_device_t *)pDev);
1228 void MM_RELEASE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)
1230 BNXE_LOCK_EXIT_SPQ((um_device_t *)pDev);
1234 void MM_ACQUIRE_CID_LOCK_IMP(lm_device_t * pDev)
1236 BNXE_LOCK_ENTER_CID((um_device_t *)pDev);
1240 void MM_RELEASE_CID_LOCK_IMP(lm_device_t * pDev)
1242 BNXE_LOCK_EXIT_CID((um_device_t *)pDev);
1246 void MM_ACQUIRE_REQUEST_LOCK_IMP(lm_device_t * pDev)
1248 BNXE_LOCK_ENTER_RRREQ((um_device_t *)pDev);
1252 void MM_RELEASE_REQUEST_LOCK_IMP(lm_device_t * pDev)
1254 BNXE_LOCK_EXIT_RRREQ((um_device_t *)pDev);
1258 void MM_ACQUIRE_PHY_LOCK_IMP(lm_device_t * pDev)
1260 BNXE_LOCK_ENTER_PHY((um_device_t *)pDev);
1264 void MM_RELEASE_PHY_LOCK_IMP(lm_device_t * pDev)
1266 BNXE_LOCK_EXIT_PHY((um_device_t *)pDev);
1270 void MM_ACQUIRE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)
1272 BNXE_LOCK_ENTER_PHY((um_device_t *)pDev);
1276 void MM_RELEASE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)
1278 BNXE_LOCK_EXIT_PHY((um_device_t *)pDev);
1282 void mm_init_lock(lm_device_t * pDev,
1283 mm_spin_lock_t * spinlock)
1285 um_device_t * pUM = (um_device_t *)pDev;
1287 mutex_init(spinlock, NULL,
1288 MUTEX_DRIVER, DDI_INTR_PRI(pUM->intrPriority));
1292 lm_status_t mm_acquire_lock(mm_spin_lock_t * spinlock)
1294 if (spinlock == NULL)
1296 return LM_STATUS_INVALID_PARAMETER;
1299 mutex_enter(spinlock);
1301 return LM_STATUS_SUCCESS;
1305 lm_status_t mm_release_lock(mm_spin_lock_t * spinlock)
1307 if (spinlock == NULL)
1309 return LM_STATUS_INVALID_PARAMETER;
1312 mutex_exit(spinlock);
1314 return LM_STATUS_SUCCESS;
1318 void MM_ACQUIRE_MCP_LOCK_IMP(lm_device_t * pDev)
1320 BNXE_LOCK_ENTER_MCP((um_device_t *)pDev);
1324 void MM_RELEASE_MCP_LOCK_IMP(lm_device_t * pDev)
1326 BNXE_LOCK_EXIT_MCP((um_device_t *)pDev);
1330 void MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)
1332 BNXE_LOCK_ENTER_ISLES_CONTROL((um_device_t *)pDev);
1336 void MM_RELEASE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)
1338 BNXE_LOCK_EXIT_ISLES_CONTROL((um_device_t *)pDev);
1342 void MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)
1344 BNXE_LOCK_ENTER_ISLES_CONTROL((um_device_t *)pDev);
1348 void MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)
1350 BNXE_LOCK_EXIT_ISLES_CONTROL((um_device_t *)pDev);
1354 void MM_ACQUIRE_IND_REG_LOCK_IMP(lm_device_t * pDev)
1356 BNXE_LOCK_ENTER_IND((um_device_t *)pDev);
1360 void MM_RELEASE_IND_REG_LOCK_IMP(lm_device_t * pDev)
1362 BNXE_LOCK_EXIT_IND((um_device_t *)pDev);
1366 void MM_ACQUIRE_LOADER_LOCK_IMP()
1368 mutex_enter(&bnxeLoaderMutex);
1372 void MM_RELEASE_LOADER_LOCK_IMP()
1374 mutex_exit(&bnxeLoaderMutex);
1378 void MM_ACQUIRE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)
1380 BNXE_LOCK_ENTER_SPREQ((um_device_t *)pDev);
1384 void MM_RELEASE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)
1386 BNXE_LOCK_EXIT_SPREQ((um_device_t *)pDev);
1390 void MM_ACQUIRE_SB_LOCK_IMP(lm_device_t * pDev, u8_t sb_idx)
1392 BNXE_LOCK_ENTER_SB((um_device_t *)pDev, sb_idx);
1396 void MM_RELEASE_SB_LOCK_IMP(lm_device_t * pDev, u8_t sb_idx)
1398 BNXE_LOCK_EXIT_SB((um_device_t *)pDev, sb_idx);
1402 void MM_ACQUIRE_ETH_CON_LOCK_IMP(lm_device_t * pDev)
1404 BNXE_LOCK_ENTER_ETH_CON((um_device_t *)pDev);
1408 void MM_RELEASE_ETH_CON_LOCK_IMP(lm_device_t * pDev)
1410 BNXE_LOCK_EXIT_ETH_CON((um_device_t *)pDev);
1414 unsigned int mm_crc32(unsigned char * address,
1415 unsigned int size,
1416 unsigned int crc)
1418 return 0;
1422 unsigned short mm_crc16(unsigned char * address,
1423 unsigned int size,
1424 unsigned short crc)
1426 return 0;
1430 lm_status_t mm_event_log_generic_arg_fwd(lm_device_t * pDev,
1431 const lm_log_id_t lm_log_id,
1432 va_list argp)
1434 um_device_t * pUM = (um_device_t *)pDev;
1435 u8_t port = 0 ;
1436 char * sz_vendor_name = NULL;
1437 char * sz_vendor_pn = NULL;
1439 switch (lm_log_id)
1441 case LM_LOG_ID_FAN_FAILURE: // fan failure detected
1443 BnxeLogWarn(pUM, "FAN FAILURE!");
1444 break;
1446 case LM_LOG_ID_UNQUAL_IO_MODULE: // SFP+ unqualified io module
1448 * expected parameters:
1449 * u8 port, const char * vendor_name, const char * vendor_pn
1451 port = va_arg(argp, int);
1452 sz_vendor_name = va_arg(argp, char*);
1453 sz_vendor_pn = va_arg(argp, char*);
1455 BnxeLogInfo(pUM, "Unqualified IO Module: %s %s (port=%d)",
1456 sz_vendor_name, sz_vendor_pn, port);
1457 break;
1459 case LM_LOG_ID_OVER_CURRENT: // SFP+ over current power
1461 * expected parametrs:
1462 * u8 port
1464 port = va_arg(argp, int);
1466 BnxeLogWarn(pUM, "SFP+ over current, power failure! (port=%d)", port);
1467 break;
1469 case LM_LOG_ID_NO_10G_SUPPORT: // 10g speed is requested but not supported
1471 * expected parametrs:
1472 * u8 port
1474 port = va_arg(argp, int);
1476 BnxeLogWarn(pUM, "10Gb speed not supported! (port=%d)", port);
1477 break;
1479 case LM_LOG_ID_PHY_UNINITIALIZED:
1481 * expected parametrs:
1482 * u8 port
1484 port = va_arg(argp, int);
1486 BnxeLogWarn(pUM, "PHY uninitialized! (port=%d)", port);
1487 break;
1489 case LM_LOG_ID_MDIO_ACCESS_TIMEOUT:
1491 #define MM_PORT_NUM(pdev) \
1492 (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) ? \
1493 (PATH_ID(pdev) + (2 * PORT_ID(pdev))) : \
1494 (PATH_ID(pdev) + PORT_ID(pdev))
1496 port = MM_PORT_NUM(&pUM->lm_dev);
1498 BnxeLogWarn(pUM, "MDIO access timeout! (port=%d)", port);
1499 break;
1501 default:
1503 BnxeLogWarn(pUM, "Unknown MM event log! (type=%d)", lm_log_id);
1504 break;
1507 return LM_STATUS_SUCCESS;
1511 lm_status_t mm_event_log_generic(lm_device_t * pDev,
1512 const lm_log_id_t lm_log_id,
1513 ...)
1515 lm_status_t lm_status = LM_STATUS_SUCCESS;
1516 va_list argp;
1518 va_start(argp, lm_log_id);
1519 lm_status = mm_event_log_generic_arg_fwd(pDev, lm_log_id, argp);
1520 va_end(argp);
1522 return lm_status;
1526 u32_t mm_build_ver_string(lm_device_t * pDev)
1528 um_device_t * pUM = (um_device_t *)pDev;
1530 snprintf((char *)pDev->ver_str,
1531 sizeof(pDev->ver_str),
1532 "%s",
1533 pUM->version);
1535 return min(strlen((char *)pDev->ver_str), strlen(pUM->version));
1539 void mm_indicate_hw_failure(lm_device_t * pDev)
1541 um_device_t * pUM = (um_device_t *)pDev;
1543 BnxeLogWarn(pUM, "HW failure indicated!");
1547 void mm_bar_read_byte(struct _lm_device_t *pdev,
1548 u8_t bar,
1549 u32_t offset,
1550 u8_t *ret)
1552 mm_read_barrier();
1553 *ret = ddi_get8(pdev->vars.reg_handle[bar],
1554 (uint8_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1555 offset));
1559 void mm_bar_read_word(struct _lm_device_t *pdev,
1560 u8_t bar,
1561 u32_t offset,
1562 u16_t *ret)
1564 mm_read_barrier();
1565 *ret = ddi_get16(pdev->vars.reg_handle[bar],
1566 (uint16_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1567 offset));
1571 void mm_bar_read_dword(struct _lm_device_t *pdev,
1572 u8_t bar,
1573 u32_t offset,
1574 u32_t *ret)
1576 mm_read_barrier();
1577 *ret = ddi_get32(pdev->vars.reg_handle[bar],
1578 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1579 offset));
1583 void mm_bar_read_ddword(struct _lm_device_t *pdev,
1584 u8_t bar,
1585 u32_t offset,
1586 u64_t *ret)
1588 mm_read_barrier();
1589 *ret = ddi_get64(pdev->vars.reg_handle[bar],
1590 (uint64_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1591 offset));
1595 void mm_bar_write_byte(struct _lm_device_t *pdev,
1596 u8_t bar,
1597 u32_t offset,
1598 u8_t val)
1600 ddi_put8(pdev->vars.reg_handle[bar],
1601 (uint8_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1602 val);
1603 mm_write_barrier();
1607 void mm_bar_write_word(struct _lm_device_t *pdev,
1608 u8_t bar,
1609 u32_t offset,
1610 u16_t val)
1612 ddi_put16(pdev->vars.reg_handle[bar],
1613 (uint16_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1614 val);
1615 mm_write_barrier();
1619 void mm_bar_write_dword(struct _lm_device_t *pdev,
1620 u8_t bar,
1621 u32_t offset,
1622 u32_t val)
1624 ddi_put32(pdev->vars.reg_handle[bar],
1625 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1626 val);
1627 mm_write_barrier();
1631 void mm_bar_write_ddword(struct _lm_device_t *pdev,
1632 u8_t bar,
1633 u32_t offset,
1634 u64_t val)
1636 ddi_put64(pdev->vars.reg_handle[bar],
1637 (uint64_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1638 val);
1639 mm_write_barrier();
1643 void mm_bar_copy_buffer(struct _lm_device_t * pdev,
1644 u8_t bar,
1645 u32_t offset,
1646 u32_t size,
1647 u32_t *buf_ptr)
1649 u32_t i;
1651 for (i = 0; i < size; i++)
1653 ddi_put32(pdev->vars.reg_handle[bar],
1654 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1655 offset + (i * 4)),
1656 *(buf_ptr + i));
1661 u32_t mm_get_cap_offset(struct _lm_device_t * pdev,
1662 u32_t capabilityID)
1664 u32_t cap_offset = PCI_CAPABILITY_LIST; //CapPtr ofset
1665 u8_t cap_id;
1666 u32_t reg_value = 0;
1668 lm_status_t lm_status = mm_read_pci(pdev, cap_offset, &reg_value);
1669 if ((lm_status == LM_STATUS_SUCCESS) && (reg_value != 0xFFFFFFFF)) {
1670 cap_offset = (u8_t)(reg_value & 0x000000FF);
1671 if ((cap_offset == 0) || (cap_offset >= 0x100)) {
1672 return 0xFFFFFFFF;
1674 } else {
1675 return 0xFFFFFFFF;
1677 do {
1678 reg_value = 0;
1679 lm_status = mm_read_pci(pdev, cap_offset, &reg_value);
1680 if ((lm_status == LM_STATUS_SUCCESS) && (reg_value != 0xFFFFFFFF)) {
1681 cap_id = (u8_t)(reg_value & 0x000000FF);
1682 if (cap_id == capabilityID) {
1683 break;
1685 cap_offset = (reg_value & 0x0000FF00) >> 8;
1686 if (cap_offset == 0) {
1687 break;
1689 } else {
1690 cap_offset = 0xFFFFFFFF;
1691 break;
1693 } while ((lm_status == LM_STATUS_SUCCESS));
1695 return cap_offset;
1698 u32_t mm_get_wol_flags(struct _lm_device_t * pdev)
1700 return LM_WAKE_UP_MODE_NONE;
1703 u32_t mm_get_feature_flags(struct _lm_device_t * pdev)
1705 return 0;
1708 u32_t mm_get_vmq_cnt(struct _lm_device_t * pdev)
1710 return 0;
1713 lm_status_t mm_i2c_update(struct _lm_device_t * pdev)
1715 return LM_STATUS_SUCCESS;
1718 u64_t mm_query_system_time(void)
1720 return 0;