9506 Want support for QLogic QL41000/45000 series devices
[unleashed.git] / usr / src / uts / common / io / qede / qede_osal.c
blob946355bfefc0d8cf41307971d51bb11db5838ed3
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
27 * You may not use this file except in compliance with the License.
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
36 #include "qede.h"
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 extern ddi_dma_attr_t qede_gen_buf_dma_attr;
40 extern struct ddi_device_acc_attr qede_desc_acc_attr;
43 * Find the dma_handle corresponding to the tx, rx data structures
45 int
46 qede_osal_find_dma_handle_for_block(qede_t *qede, void *addr,
47 ddi_dma_handle_t *dma_handle)
49 qede_phys_mem_entry_t *entry;
50 int ret = DDI_FAILURE;
52 mutex_enter(&qede->phys_mem_list.lock);
53 QEDE_LIST_FOR_EACH_ENTRY(entry,
54 /* LINTED E_BAD_PTR_CAST_ALIGN */
55 &qede->phys_mem_list.head,
56 qede_phys_mem_entry_t,
57 list_entry) {
58 if (entry->paddr == addr) {
59 *dma_handle = entry->dma_handle;
60 ret = DDI_SUCCESS;
61 break;
65 mutex_exit(&qede->phys_mem_list.lock);
67 return (ret);
70 void
71 qede_osal_dma_sync(struct ecore_dev *edev, void* addr, u32 size, bool is_post)
73 qede_t *qede = (qede_t *)edev;
74 qede_phys_mem_entry_t *entry;
75 ddi_dma_handle_t *dma_handle = NULL;
76 uint_t type = (is_post == false) ? DDI_DMA_SYNC_FORDEV :
77 DDI_DMA_SYNC_FORKERNEL;
79 mutex_enter(&qede->phys_mem_list.lock);
81 /* LINTED E_BAD_PTR_CAST_ALIGN */
82 QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
83 qede_phys_mem_entry_t, list_entry) {
84 if (entry->paddr == addr) {
85 dma_handle = &entry->dma_handle;
89 if (dma_handle == NULL) {
90 qede_print_err("!%s(%d): addr %p not found in list",
91 __func__, qede->instance, addr);
92 mutex_exit(&qede->phys_mem_list.lock);
93 return;
94 } else {
95 (void) ddi_dma_sync(*dma_handle,
96 0 /* offset into the mem block */,
97 size, type);
100 mutex_exit(&qede->phys_mem_list.lock);
103 void *
104 qede_osal_zalloc(struct ecore_dev *edev, int flags, size_t size)
106 qede_t *qede = (qede_t *)edev;
107 qede_mem_list_entry_t *new_entry;
108 void *buf;
110 if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
111 == NULL) {
112 qede_print_err("%s(%d): Failed to alloc new list entry",
113 __func__, qede->instance);
114 return (NULL);
117 if ((buf = kmem_zalloc(size, flags)) == NULL) {
118 qede_print_err("%s(%d): Failed to alloc mem, size %d",
119 __func__, qede->instance, size);
120 kmem_free(new_entry, sizeof (qede_mem_list_entry_t));
121 return (NULL);
124 new_entry->size = size;
125 new_entry->buf = buf;
127 mutex_enter(&qede->mem_list.mem_list_lock);
128 QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
129 mutex_exit(&qede->mem_list.mem_list_lock);
131 return (buf);
135 void *
136 qede_osal_alloc(struct ecore_dev *edev, int flags, size_t size)
138 qede_t *qede = (qede_t *)edev;
139 qede_mem_list_entry_t *new_entry;
140 void *buf;
142 if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
143 == NULL) {
144 qede_print_err("%s(%d): Failed to alloc new list entry",
145 __func__, qede->instance);
146 return (NULL);
149 if ((buf = kmem_alloc(size, flags)) == NULL) {
150 qede_print_err("%s(%d): Failed to alloc %d bytes",
151 __func__, qede->instance, size);
152 kmem_free(new_entry, sizeof (qede_mem_list_t));
153 return (NULL);
156 new_entry->size = size;
157 new_entry->buf = buf;
159 mutex_enter(&qede->mem_list.mem_list_lock);
160 QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
161 mutex_exit(&qede->mem_list.mem_list_lock);
163 return (buf);
166 void
167 qede_osal_free(struct ecore_dev *edev, void *addr)
169 qede_t *qede = (qede_t *)edev;
170 qede_mem_list_entry_t *mem_entry;
172 mutex_enter(&qede->mem_list.mem_list_lock);
174 /* LINTED E_BAD_PTR_CAST_ALIGN */
175 QEDE_LIST_FOR_EACH_ENTRY(mem_entry, &qede->mem_list.mem_list_head,
176 qede_mem_list_entry_t, mem_entry) {
177 if (mem_entry->buf == addr) {
178 QEDE_LIST_REMOVE(&mem_entry->mem_entry,
179 &qede->mem_list.mem_list_head);
180 kmem_free(addr, mem_entry->size);
181 kmem_free(mem_entry, sizeof (qede_mem_list_entry_t));
182 break;
186 mutex_exit(&qede->mem_list.mem_list_lock);
190 * @VB: What are the alignment requirements here ??
192 void *
193 qede_osal_dma_alloc_coherent(struct ecore_dev *edev, dma_addr_t *paddr,
194 size_t size)
196 qede_t *qede = (qede_t *)edev;
197 qede_phys_mem_entry_t *new_entry;
198 ddi_dma_handle_t *dma_handle;
199 ddi_acc_handle_t *dma_acc_handle;
200 ddi_dma_cookie_t cookie;
201 int ret;
202 caddr_t pbuf;
203 unsigned int count;
205 memset(&cookie, 0, sizeof (cookie));
207 if ((new_entry =
208 kmem_zalloc(sizeof (qede_phys_mem_entry_t), KM_NOSLEEP)) == NULL) {
209 qede_print_err("%s(%d): Failed to alloc new list entry",
210 __func__, qede->instance);
211 return (NULL);
214 dma_handle = &new_entry->dma_handle;
215 dma_acc_handle = &new_entry->dma_acc_handle;
217 if ((ret =
218 ddi_dma_alloc_handle(qede->dip, &qede_gen_buf_dma_attr,
219 DDI_DMA_DONTWAIT,
220 NULL, dma_handle)) != DDI_SUCCESS) {
221 qede_print_err("%s(%d): Failed to alloc dma handle",
222 __func__, qede->instance);
223 qede_stacktrace(qede);
224 goto free;
227 if ((ret = ddi_dma_mem_alloc(*dma_handle, size, &qede_desc_acc_attr,
228 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &pbuf, &size,
229 dma_acc_handle)) != DDI_SUCCESS) {
230 qede_print_err("%s(%d): Failed to alloc dma mem %d bytes",
231 __func__, qede->instance, size);
232 qede_stacktrace(qede);
233 goto free_hdl;
236 if ((ret = ddi_dma_addr_bind_handle(*dma_handle, NULL, pbuf, size,
237 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
238 DDI_DMA_DONTWAIT, NULL, &cookie, &count)) != DDI_DMA_MAPPED) {
239 qede_print("!%s(%d): failed to bind dma addr to handle,"
240 " ret %d",
241 __func__, qede->instance, ret);
242 goto free_dma_mem;
245 if (count != 1) {
246 qede_print("%s(%d): ncookies = %d for phys addr %p, "
247 "discard dma buffer",
248 __func__, qede->instance, count, &cookie.dmac_laddress);
249 goto free_dma_mem;
252 new_entry->size = size;
253 new_entry->virt_addr = pbuf;
255 new_entry->paddr = (void *)cookie.dmac_laddress;
257 *paddr = (dma_addr_t)new_entry->paddr;
259 mutex_enter(&qede->phys_mem_list.lock);
260 QEDE_LIST_ADD(&new_entry->list_entry, &qede->phys_mem_list.head);
261 mutex_exit(&qede->phys_mem_list.lock);
263 return (new_entry->virt_addr);
265 free_dma_mem:
266 ddi_dma_mem_free(dma_acc_handle);
267 free_hdl:
268 ddi_dma_free_handle(dma_handle);
269 free:
270 kmem_free(new_entry, sizeof (qede_phys_mem_entry_t));
271 return (NULL);
274 void
275 qede_osal_dma_free_coherent(struct ecore_dev *edev, void *vaddr,
276 dma_addr_t paddr, size_t size)
278 qede_t *qede = (qede_t *)edev;
279 qede_phys_mem_entry_t *entry;
281 mutex_enter(&qede->phys_mem_list.lock);
283 /* LINTED E_BAD_PTR_CAST_ALIGN */
284 QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
285 qede_phys_mem_entry_t, list_entry) {
286 if (entry->virt_addr == vaddr) {
287 QEDE_LIST_REMOVE(&entry->list_entry,
288 &qede->phys_mem_list.head);
289 ddi_dma_unbind_handle(entry->dma_handle);
290 ddi_dma_mem_free(&entry->dma_acc_handle);
291 ddi_dma_free_handle(&entry->dma_handle);
292 kmem_free(entry, sizeof (qede_phys_mem_entry_t));
293 break;
297 mutex_exit(&qede->phys_mem_list.lock);
300 static int
301 qede_get_port_type(uint32_t media_type)
303 uint32_t port_type;
305 switch (media_type) {
306 case MEDIA_SFPP_10G_FIBER:
307 case MEDIA_SFP_1G_FIBER:
308 case MEDIA_XFP_FIBER:
309 case MEDIA_KR:
310 port_type = GLDM_FIBER;
311 break;
312 case MEDIA_DA_TWINAX:
313 port_type = GLDM_BNC; /* Check? */
314 break;
315 case MEDIA_BASE_T:
316 port_type = GLDM_TP;
317 break;
318 case MEDIA_NOT_PRESENT:
319 case MEDIA_UNSPECIFIED:
320 default:
321 port_type = GLDM_UNKNOWN;
322 break;
324 return (port_type);
327 void
328 qede_get_link_info(struct ecore_hwfn *hwfn, struct qede_link_cfg *lnkCfg)
330 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
331 qede_t *qede = (qede_t *)(void *)edev;
332 uint32_t media_type;
333 struct ecore_mcp_link_state lnk_state;
334 struct ecore_mcp_link_params lnk_params;
335 struct ecore_mcp_link_capabilities lnk_caps;
337 ecore_mcp_get_media_type(edev, &media_type);
338 lnkCfg->port = qede_get_port_type(media_type);
340 memcpy(&lnk_state, ecore_mcp_get_link_state(hwfn),
341 sizeof (lnk_state));
342 memcpy(&lnk_params, ecore_mcp_get_link_params(hwfn),
343 sizeof (lnk_params));
344 memcpy(&lnk_caps, ecore_mcp_get_link_capabilities(hwfn),
345 sizeof (lnk_caps));
347 if (lnk_state.link_up) {
348 lnkCfg->link_up = B_TRUE;
349 lnkCfg->speed = lnk_state.speed;
350 lnkCfg->duplex = DUPLEX_FULL;
353 if (lnk_params.speed.autoneg) {
354 lnkCfg->supp_capab.autoneg = B_TRUE;
355 lnkCfg->adv_capab.autoneg = B_TRUE;
357 if (lnk_params.speed.autoneg ||
358 (lnk_params.pause.forced_rx && lnk_params.pause.forced_tx)) {
359 lnkCfg->supp_capab.asym_pause = B_TRUE;
360 lnkCfg->adv_capab.asym_pause = B_TRUE;
362 if (lnk_params.speed.autoneg ||
363 lnk_params.pause.forced_rx || lnk_params.pause.forced_tx) {
364 lnkCfg->supp_capab.pause = B_TRUE;
365 lnkCfg->adv_capab.pause = B_TRUE;
368 if (lnk_params.speed.advertised_speeds &
369 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
370 lnkCfg->adv_capab.param_10000fdx = B_TRUE;
372 if(lnk_params.speed.advertised_speeds &
373 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
374 lnkCfg->adv_capab.param_25000fdx = B_TRUE;
376 if (lnk_params.speed.advertised_speeds &
377 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
378 lnkCfg->adv_capab.param_40000fdx = B_TRUE;
380 if (lnk_params.speed.advertised_speeds &
381 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
382 lnkCfg->adv_capab.param_50000fdx = B_TRUE;
384 if (lnk_params.speed.advertised_speeds &
385 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
386 lnkCfg->adv_capab.param_100000fdx = B_TRUE;
388 if (lnk_params.speed.advertised_speeds &
389 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
390 lnkCfg->adv_capab.param_1000fdx = B_TRUE;
391 lnkCfg->adv_capab.param_1000hdx = B_TRUE;
394 lnkCfg->autoneg = lnk_params.speed.autoneg;
396 if (lnk_caps.speed_capabilities &
397 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
398 lnkCfg->supp_capab.param_10000fdx = B_TRUE;
400 if(lnk_caps.speed_capabilities &
401 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
402 lnkCfg->supp_capab.param_25000fdx = B_TRUE;
404 if (lnk_caps.speed_capabilities &
405 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
406 lnkCfg->supp_capab.param_40000fdx = B_TRUE;
408 if (lnk_caps.speed_capabilities &
409 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
410 lnkCfg->supp_capab.param_50000fdx = B_TRUE;
412 if (lnk_caps.speed_capabilities &
413 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
414 lnkCfg->supp_capab.param_100000fdx = B_TRUE;
416 if (lnk_caps.speed_capabilities &
417 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
418 lnkCfg->supp_capab.param_1000fdx = B_TRUE;
419 lnkCfg->supp_capab.param_1000hdx = B_TRUE;
422 if (lnk_params.pause.autoneg) {
423 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_AUTONEG_ENABLE;
425 if (lnk_params.pause.forced_rx) {
426 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_RX_ENABLE;
428 if (lnk_params.pause.forced_tx) {
429 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_TX_ENABLE;
433 if(lnk_state.partner_adv_speed &
434 ECORE_LINK_PARTNER_SPEED_1G_HD) {
435 lnkCfg->rem_capab.param_1000hdx = B_TRUE;
437 if(lnk_state.partner_adv_speed &
438 ECORE_LINK_PARTNER_SPEED_1G_FD) {
439 lnkCfg->rem_capab.param_1000fdx = B_TRUE;
441 if(lnk_state.partner_adv_speed &
442 ECORE_LINK_PARTNER_SPEED_10G) {
443 lnkCfg->rem_capab.param_10000fdx = B_TRUE;
445 if(lnk_state.partner_adv_speed &
446 ECORE_LINK_PARTNER_SPEED_40G) {
447 lnkCfg->rem_capab.param_40000fdx = B_TRUE;
449 if(lnk_state.partner_adv_speed &
450 ECORE_LINK_PARTNER_SPEED_50G) {
451 lnkCfg->rem_capab.param_50000fdx = B_TRUE;
453 if(lnk_state.partner_adv_speed &
454 ECORE_LINK_PARTNER_SPEED_100G) {
455 lnkCfg->rem_capab.param_100000fdx = B_TRUE;
458 if(lnk_state.an_complete) {
459 lnkCfg->rem_capab.autoneg = B_TRUE;
462 if(lnk_state.partner_adv_pause) {
463 lnkCfg->rem_capab.pause = B_TRUE;
465 if(lnk_state.partner_adv_pause ==
466 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
467 lnk_state.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
468 lnkCfg->rem_capab.asym_pause = B_TRUE;
472 void
473 qede_osal_link_update(struct ecore_hwfn *hwfn)
475 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
476 qede_t *qede = (qede_t *)(void *)edev;
477 struct qede_link_cfg link_cfg;
479 memset(&link_cfg, 0 , sizeof (struct qede_link_cfg));
480 qede_get_link_info(hwfn, &link_cfg);
482 if (link_cfg.duplex == DUPLEX_FULL) {
483 qede->props.link_duplex = DUPLEX_FULL;
484 } else {
485 qede->props.link_duplex = DUPLEX_HALF;
488 if (!link_cfg.link_up) {
489 qede_print("!%s(%d): Link marked down",
490 __func__, qede->instance);
491 qede->params.link_state = 0;
492 qede->props.link_duplex = B_FALSE;
493 qede->props.link_speed = 0;
494 qede->props.tx_pause = B_FALSE;
495 qede->props.rx_pause = B_FALSE;
496 qede->props.uptime = 0;
497 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
498 } else if (link_cfg.link_up) {
499 qede_print("!%s(%d): Link marked up",
500 __func__, qede->instance);
501 qede->params.link_state = 1;
502 qede->props.link_speed = link_cfg.speed;
503 qede->props.link_duplex = link_cfg.duplex;
504 qede->props.tx_pause = (link_cfg.pause_cfg &
505 QEDE_LINK_PAUSE_TX_ENABLE) ? B_TRUE : B_FALSE;
506 qede->props.rx_pause = (link_cfg.pause_cfg &
507 QEDE_LINK_PAUSE_RX_ENABLE) ? B_TRUE : B_FALSE;
508 qede->props.uptime = ddi_get_time();
509 mac_link_update(qede->mac_handle, LINK_STATE_UP);
513 unsigned long
514 log2_align(unsigned long n)
516 unsigned long ret = n ? 1 : 0;
517 unsigned long _n = n >> 1;
519 while (_n) {
520 _n >>= 1;
521 ret <<= 1;
524 if (ret < n) {
525 ret <<= 1;
528 return (ret);
532 LOG2(u32 v)
534 u32 r = 0;
535 while (v >>= 1) {
536 r++;
538 return (r);
542 /* LINTED E_FUNC_ARG_UNUSED */
543 qede_osal_pci_find_ext_capab(struct ecore_dev *edev, u16 pcie_id)
545 int offset = 0;
547 return (offset);
550 void
551 qede_osal_pci_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
553 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
554 qede_t *qede = (qede_t *)(void *)edev;
555 u64 addr = qede->pci_bar0_base;
557 addr += offset;
559 ddi_put32(qede->regs_handle, (u32 *)addr, val);
562 void
563 qede_osal_pci_write16(struct ecore_hwfn *hwfn, u32 offset, u16 val)
565 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
566 qede_t *qede = (qede_t *)(void *)edev;
567 u64 addr = qede->pci_bar0_base;
569 addr += offset;
571 ddi_put16(qede->regs_handle, (u16 *)addr, val);
575 qede_osal_pci_read32(struct ecore_hwfn *hwfn, u32 offset)
577 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
578 qede_t *qede = (qede_t *)(void *)edev;
579 u32 val = 0;
580 u64 addr = qede->pci_bar0_base;
582 addr += offset;
584 val = ddi_get32(qede->regs_handle, (u32 *)addr);
586 return (val);
589 void
590 qede_osal_pci_bar2_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
592 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
593 qede_t *qede = (qede_t *)(void *)edev;
594 u64 addr = qede->pci_bar2_base;
596 addr += offset;
597 ddi_put32(qede->doorbell_handle, (u32 *)addr, val);
601 qede_osal_direct_reg_read32(struct ecore_hwfn *hwfn, void *addr)
603 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
604 qede_t *qede = (qede_t *)(void *)edev;
606 return (ddi_get32(qede->regs_handle, (u32 *)addr));
609 void
610 qede_osal_direct_reg_write32(struct ecore_hwfn *hwfn, void *addr, u32 value)
612 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
613 qede_t *qede = (qede_t *)(void *)edev;
615 ddi_put32(qede->regs_handle, (u32 *)addr, value);
618 u32 *
619 qede_osal_reg_addr(struct ecore_hwfn *hwfn, u32 addr)
621 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
622 qede_t *qede = (qede_t *)(void *)edev;
624 return ((u32 *)(qede->pci_bar0_base + addr));
627 void
628 qede_osal_pci_read_config_byte(struct ecore_dev *edev, u32 addr, u8 *val)
631 qede_t *qede = (qede_t *)edev;
633 *val = pci_config_get8(qede->pci_cfg_handle, (off_t)addr);
636 void
637 qede_osal_pci_read_config_word(struct ecore_dev *edev, u32 addr, u16 *val)
639 qede_t *qede = (qede_t *)edev;
641 *val = pci_config_get16(qede->pci_cfg_handle, (off_t)addr);
644 void
645 qede_osal_pci_read_config_dword(struct ecore_dev *edev, u32 addr, u32 *val)
647 qede_t *qede = (qede_t *)edev;
649 *val = pci_config_get32(qede->pci_cfg_handle, (off_t)addr);
653 void
654 qede_print(char *format, ...)
656 va_list ap;
658 va_start(ap, format);
659 vcmn_err(CE_NOTE, format, ap);
660 va_end(ap);
663 void
664 qede_print_err(char *format, ...)
666 va_list ap;
668 va_start(ap, format);
669 vcmn_err(CE_WARN, format, ap);
670 va_end(ap);
674 * Check if any mem/dma entries are left behind
675 * after unloading the ecore. If found
676 * then make sure they are freed
679 qede_osal_cleanup(qede_t *qede)
681 qede_mem_list_entry_t *entry = NULL;
682 qede_mem_list_entry_t *temp = NULL;
683 qede_phys_mem_entry_t *entry_phys;
684 qede_phys_mem_entry_t *temp_phys;
687 * Check for misplaced mem. blocks(if any)
689 mutex_enter(&qede->mem_list.mem_list_lock);
691 if (!QEDE_LIST_EMPTY(&qede->mem_list.mem_list_head)) {
693 * Something went wrong either in ecore
694 * or the osal mem management routines
695 * and the mem entry was not freed
697 qede_print_err("!%s(%d): Mem entries left behind",
698 __func__, qede->instance);
700 QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry,
701 temp,
702 /* LINTED E_BAD_PTR_CAST_ALIGN */
703 &qede->mem_list.mem_list_head,
704 mem_entry,
705 qede_mem_list_entry_t) {
706 qede_print("!%s(%d): Cleaning-up entry %p",
707 __func__, qede->instance, entry);
708 QEDE_LIST_REMOVE(&entry->mem_entry,
709 &qede->mem_list.mem_list_head);
710 if (entry->buf) {
711 kmem_free(entry->buf, entry->size);
712 kmem_free(entry,
713 sizeof (qede_mem_list_entry_t));
718 mutex_exit(&qede->mem_list.mem_list_lock);
721 * Check for misplaced dma blocks (if any)
723 mutex_enter(&qede->phys_mem_list.lock);
725 if (!QEDE_LIST_EMPTY(&qede->phys_mem_list.head)) {
726 qede_print("!%s(%d): Dma entries left behind",
727 __func__, qede->instance);
729 QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry_phys,
730 temp_phys,
731 /* LINTED E_BAD_PTR_CAST_ALIGN */
732 &qede->phys_mem_list.head,
733 list_entry,
734 qede_phys_mem_entry_t) {
735 qede_print("!%s(%d): Cleaning-up entry %p",
736 __func__, qede->instance, entry_phys);
737 QEDE_LIST_REMOVE(&entry_phys->list_entry,
738 &qede->phys_mem_list.head);
740 if (entry_phys->virt_addr) {
741 ddi_dma_unbind_handle(entry_phys->dma_handle);
742 ddi_dma_mem_free(&entry_phys->dma_acc_handle);
743 ddi_dma_free_handle(&entry_phys->dma_handle);
744 kmem_free(entry_phys,
745 sizeof (qede_phys_mem_entry_t));
750 mutex_exit(&qede->phys_mem_list.lock);
752 return (0);
756 void
757 qede_osal_recovery_handler(struct ecore_hwfn *hwfn)
759 struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
760 qede_t *qede = (qede_t *)(void *)edev;
762 cmn_err(CE_WARN, "!%s(%d):Not implemented !",
763 __func__, qede->instance);
768 enum _ecore_status_t
769 qede_osal_iov_vf_acquire(struct ecore_hwfn *p_hwfn, int vf_id)
771 return (ECORE_SUCCESS);
775 void
776 qede_osal_pci_write_config_word(struct ecore_dev *dev, u32 addr, u16 pcie_id)
778 qede_t *qede = (qede_t *)dev;
779 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
781 pci_config_put16(pci_cfg_handle, (off_t)addr, pcie_id);
784 void *
785 qede_osal_valloc(struct ecore_dev *dev, u32 size)
787 void *ptr = 0;
789 return (ptr);
792 void
793 qede_osal_vfree(struct ecore_dev *dev, void* mem)
797 int
798 /* LINTED E_FUNC_ARG_UNUSED */
799 qede_osal_pci_find_capability(struct ecore_dev *dev, u16 pcie_id)
801 return 1;
804 void
805 qede_osal_poll_mode_dpc(struct ecore_hwfn *p_hwfn)
809 int
810 /* LINTED E_FUNC_ARG_UNUSED */
811 qede_osal_bitmap_weight(unsigned long *bitmap, uint32_t nbits)
813 uint32_t count = 0, temp = *bitmap;
814 return count;
817 void
818 /* LINTED E_FUNC_ARG_UNUSED */
819 qede_osal_mfw_tlv_req(struct ecore_hwfn *p_hwfn)
823 u32
824 /* LINTED E_FUNC_ARG_UNUSED */
825 qede_osal_crc32(u32 crc, u8 *buf, u64 length)
827 return 1;
830 void
831 /* LINTED E_FUNC_ARG_UNUSED */
832 qede_osal_hw_info_change(struct ecore_hwfn *p_hwfn, int change)
836 void
837 /* LINTED E_FUNC_ARG_UNUSED */
838 OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial)
842 /* LINTED E_FUNC_ARG_UNUSED */
843 OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len,
844 u8 init_value)
846 return (0);
848 void
849 /* LINTED E_FUNC_ARG_UNUSED */
850 OSAL_DPC_SYNC(struct ecore_hwfn *p_hwfn)
852 //Do nothing right now.