4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
30 * This file manages the virtualization resources for a guest domain.
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 #include <sys/nxge/nxge_hio.h>
43 * Map in a guest domain's register set(s).
49 * Note that we set <is_vraddr> to TRUE.
54 static ddi_device_acc_attr_t nxge_guest_register_access_attributes
= {
61 nxge_guest_regs_map(nxge_t
*nxge
)
67 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_guest_regs_map"));
69 /* So we can allocate properly-aligned memory. */
70 nxge
->niu_type
= N2_NIU
; /* Version 1.0 only */
71 nxge
->function_num
= nxge
->instance
; /* HIOXXX Looking for ideas. */
73 nxge
->dev_regs
= KMEM_ZALLOC(sizeof (dev_regs_t
), KM_SLEEP
);
74 regs
= nxge
->dev_regs
;
76 if ((rv
= ddi_dev_regsize(nxge
->dip
, 0, ®size
)) != DDI_SUCCESS
) {
77 NXGE_ERROR_MSG((nxge
, HIO_CTL
, "ddi_dev_regsize() failed"));
81 rv
= ddi_regs_map_setup(nxge
->dip
, 0, (caddr_t
*)®s
->nxge_regp
, 0, 0,
82 &nxge_guest_register_access_attributes
, ®s
->nxge_regh
);
84 if (rv
!= DDI_SUCCESS
) {
85 NXGE_ERROR_MSG((nxge
, HIO_CTL
, "ddi_regs_map_setup() failed"));
89 nxge
->npi_handle
.regh
= regs
->nxge_regh
;
90 nxge
->npi_handle
.regp
= (npi_reg_ptr_t
)regs
->nxge_regp
;
91 nxge
->npi_handle
.is_vraddr
= B_TRUE
;
92 nxge
->npi_handle
.function
.instance
= nxge
->instance
;
93 nxge
->npi_handle
.function
.function
= nxge
->function_num
;
94 nxge
->npi_handle
.nxgep
= (void *)nxge
;
96 /* NPI_REG_ADD_HANDLE_SET() */
97 nxge
->npi_reg_handle
.regh
= regs
->nxge_regh
;
98 nxge
->npi_reg_handle
.regp
= (npi_reg_ptr_t
)regs
->nxge_regp
;
99 nxge
->npi_reg_handle
.is_vraddr
= B_TRUE
;
100 nxge
->npi_reg_handle
.function
.instance
= nxge
->instance
;
101 nxge
->npi_reg_handle
.function
.function
= nxge
->function_num
;
102 nxge
->npi_reg_handle
.nxgep
= (void *)nxge
;
104 /* NPI_VREG_ADD_HANDLE_SET() */
105 nxge
->npi_vreg_handle
.regh
= regs
->nxge_regh
;
106 nxge
->npi_vreg_handle
.regp
= (npi_reg_ptr_t
)regs
->nxge_regp
;
107 nxge
->npi_vreg_handle
.is_vraddr
= B_TRUE
;
108 nxge
->npi_vreg_handle
.function
.instance
= nxge
->instance
;
109 nxge
->npi_vreg_handle
.function
.function
= nxge
->function_num
;
110 nxge
->npi_vreg_handle
.nxgep
= (void *)nxge
;
112 regs
->nxge_vir_regp
= regs
->nxge_regp
;
113 regs
->nxge_vir_regh
= regs
->nxge_regh
;
116 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
117 * or FCODE reg variables.
120 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_guest_regs_map"));
126 nxge_guest_regs_map_free(
129 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_guest_regs_map_free"));
131 if (nxge
->dev_regs
) {
132 if (nxge
->dev_regs
->nxge_regh
) {
133 NXGE_DEBUG_MSG((nxge
, DDI_CTL
,
134 "==> nxge_unmap_regs: device registers"));
135 ddi_regs_map_free(&nxge
->dev_regs
->nxge_regh
);
136 nxge
->dev_regs
->nxge_regh
= NULL
;
138 kmem_free(nxge
->dev_regs
, sizeof (dev_regs_t
));
142 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_guest_regs_map_free"));
148 * -------------------------------------------------------------
150 * -------------------------------------------------------------
152 static nxge_hio_dc_t
*nxge_guest_dc_alloc(
153 nxge_t
*, nxge_hio_vr_t
*, nxge_grp_type_t
);
155 static void res_map_parse(nxge_t
*, nxge_grp_type_t
, uint64_t);
156 static void nxge_check_guest_state(nxge_hio_vr_t
*);
161 * If we have been given a virtualization region (VR),
162 * then initialize it.
173 nxge_hio_vr_add(nxge_t
*nxge
)
175 extern nxge_status_t
nxge_mac_register(p_nxge_t
);
177 nxge_hio_data_t
*nhd
= (nxge_hio_data_t
*)nxge
->nxge_hw_p
->hio
;
184 uint64_t vr_address
, vr_size
;
186 nxhv_dc_fp_t
*tx
, *rx
;
187 uint64_t tx_map
, rx_map
;
190 nxge_status_t status
;
192 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_hio_vr_add"));
194 if (nhd
->type
== NXGE_HIO_TYPE_SERVICE
) {
196 * Can't add VR to the service domain from which we came.
198 ASSERT(nhd
->type
== NXGE_HIO_TYPE_GUEST
);
199 return (DDI_FAILURE
);
205 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, nxge
->dip
,
206 0, "reg", ®_val
, ®_len
) != DDI_PROP_SUCCESS
) {
207 NXGE_DEBUG_MSG((nxge
, VPD_CTL
, "`reg' property not found"));
208 return (DDI_FAILURE
);
211 cookie
= (uint32_t)(reg_val
[0]);
212 ddi_prop_free(reg_val
);
215 hv_rv
= (*fp
->getinfo
)(cookie
, &vr_address
, &vr_size
);
217 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
218 "vr->getinfo() failed"));
219 return (DDI_FAILURE
);
223 * In the guest domain, we can use any VR data structure
224 * we want, because we're not supposed to know which VR
225 * the service domain has allocated to us.
227 * In the current version, the least significant nybble of
228 * the cookie is the VR region, but that could change
231 * In the future, a guest may have more than one VR allocated
232 * to it, which is why we go through this exercise.
234 MUTEX_ENTER(&nhd
->lock
);
235 for (vr_index
= 0; vr_index
< FUNC_VIR_MAX
; vr_index
++) {
236 if (nhd
->vr
[vr_index
].nxge
== 0) {
237 nhd
->vr
[vr_index
].nxge
= (uintptr_t)nxge
;
241 MUTEX_EXIT(&nhd
->lock
);
243 if (vr_index
== FUNC_VIR_MAX
) {
244 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
, "nxge_hio_vr_add "
245 "no VRs available"));
246 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
247 "nxge_hio_vr_add(%d): cookie(0x%x)\n",
248 nxge
->instance
, cookie
));
249 return (DDI_FAILURE
);
252 vr
= &nhd
->vr
[vr_index
];
254 vr
->nxge
= (uintptr_t)nxge
;
255 vr
->cookie
= (uint32_t)cookie
;
256 vr
->address
= vr_address
;
258 vr
->region
= vr_index
;
261 * This is redundant data, but useful nonetheless. It helps
262 * us to keep track of which RDCs & TDCs belong to us.
264 if (nxge
->tx_set
.lg
.count
== 0)
265 (void) nxge_grp_add(nxge
, NXGE_TRANSMIT_GROUP
);
266 if (nxge
->rx_set
.lg
.count
== 0)
267 (void) nxge_grp_add(nxge
, NXGE_RECEIVE_GROUP
);
272 if (nxge_hio_intr_init(nxge
) != NXGE_OK
) {
273 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
274 "nxge_hio_intr_init() failed"));
275 return (DDI_FAILURE
);
279 * Now we find out which RDCs & TDCs have been allocated to us.
284 * The map we get back is a bitmap of the
285 * virtual Tx DMA channels we own -
286 * they are NOT real channel numbers.
288 hv_rv
= (*tx
->get_map
)(vr
->cookie
, &tx_map
);
290 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
291 "tx->get_map() failed"));
292 return (DDI_FAILURE
);
294 res_map_parse(nxge
, NXGE_TRANSMIT_GROUP
, tx_map
);
297 * For each channel, mark these two fields
298 * while we have the VR data structure.
300 for (i
= 0; i
< VP_CHANNEL_MAX
; i
++) {
301 if ((1 << i
) & tx_map
) {
302 dc
= nxge_guest_dc_alloc(nxge
, vr
,
303 NXGE_TRANSMIT_GROUP
);
305 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
307 return (DDI_FAILURE
);
309 dc
->channel
= (nxge_channel_t
)i
;
317 * I repeat, the map we get back is a bitmap of
318 * the virtual Rx DMA channels we own -
319 * they are NOT real channel numbers.
321 hv_rv
= (*rx
->get_map
)(vr
->cookie
, &rx_map
);
323 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
324 "rx->get_map() failed"));
325 return (DDI_FAILURE
);
327 res_map_parse(nxge
, NXGE_RECEIVE_GROUP
, rx_map
);
330 * For each channel, mark these two fields
331 * while we have the VR data structure.
333 for (i
= 0; i
< VP_CHANNEL_MAX
; i
++) {
334 if ((1 << i
) & rx_map
) {
335 dc
= nxge_guest_dc_alloc(nxge
, vr
,
338 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
340 return (DDI_FAILURE
);
342 dc
->channel
= (nxge_channel_t
)i
;
347 status
= nxge_mac_register(nxge
);
348 if (status
!= NXGE_OK
) {
349 cmn_err(CE_WARN
, "nxge(%d): nxge_mac_register failed\n",
351 return (DDI_FAILURE
);
354 nxge
->hio_vr
= vr
; /* For faster lookups. */
356 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_hio_vr_add"));
358 return (DDI_SUCCESS
);
362 * nxge_guest_dc_alloc
364 * Find a free nxge_hio_dc_t data structure.
368 * type TRANSMIT or RECEIVE.
379 nxge_grp_type_t type
)
381 nxge_hio_data_t
*nhd
= (nxge_hio_data_t
*)nxge
->nxge_hw_p
->hio
;
386 * In the guest domain, there may be more than one VR.
387 * each one of which will be using the same slots, or
388 * virtual channel numbers. So the <nhd>'s rdc & tdc
389 * tables must be shared.
391 if (type
== NXGE_TRANSMIT_GROUP
) {
393 limit
= NXGE_MAX_TDCS
;
396 limit
= NXGE_MAX_RDCS
;
399 MUTEX_ENTER(&nhd
->lock
);
400 for (i
= 0; i
< limit
; i
++, dc
++) {
403 dc
->cookie
= vr
->cookie
;
404 MUTEX_EXIT(&nhd
->lock
);
408 MUTEX_EXIT(&nhd
->lock
);
414 nxge_hio_get_dc_htable_idx(nxge_t
*nxge
, vpc_type_t type
, uint32_t channel
)
418 ASSERT(isLDOMguest(nxge
));
420 dc
= nxge_grp_dc_find(nxge
, type
, channel
);
424 return (dc
->ldg
.vector
);
430 * Parse a resource map. The resources are DMA channels, receive
431 * or transmit, depending on <type>.
435 * type Transmit or receive.
436 * res_map The resource map to parse.
446 nxge_grp_type_t type
,
449 uint8_t slots
, mask
, slot
;
452 nxge_hw_pt_cfg_t
*hardware
;
455 /* Slots are numbered 0 - 7. */
456 slots
= (uint8_t)(res_map
& 0xff);
458 /* Count the number of bits in the bitmap. */
459 for (slot
= 0, count
= 0, mask
= 1; slot
< 8; slot
++) {
467 hardware
= &nxge
->pt_config
.hw_config
;
468 group
= (type
== NXGE_TRANSMIT_GROUP
) ?
469 nxge
->tx_set
.group
[0] : nxge
->rx_set
.group
[0];
472 * A guest domain has one Tx & one Rx group, so far.
473 * In the future, there may be more than one.
475 if (type
== NXGE_TRANSMIT_GROUP
) {
476 nxge_dma_pt_cfg_t
*port
= &nxge
->pt_config
;
477 nxge_tdc_grp_t
*tdc_grp
= &nxge
->pt_config
.tdc_grps
[0];
479 hardware
->tdc
.start
= first
;
480 hardware
->tdc
.count
= count
;
481 hardware
->tdc
.owned
= count
;
483 tdc_grp
->start_tdc
= first
;
484 tdc_grp
->max_tdcs
= (uint8_t)count
;
485 tdc_grp
->grp_index
= group
->index
;
486 tdc_grp
->map
= slots
;
491 * Pointless in a guest domain. This bitmap is used
492 * in only one place: nxge_txc_init(),
493 * a service-domain-only function.
495 port
->tx_dma_map
= slots
;
497 nxge
->tx_set
.owned
.map
|= slots
;
499 nxge_rdc_grp_t
*rdc_grp
= &nxge
->pt_config
.rdc_grps
[0];
501 hardware
->start_rdc
= first
;
502 hardware
->max_rdcs
= count
;
504 rdc_grp
->start_rdc
= (uint8_t)first
;
505 rdc_grp
->max_rdcs
= (uint8_t)count
;
506 rdc_grp
->def_rdc
= (uint8_t)first
;
508 rdc_grp
->map
= slots
;
511 nxge
->rx_set
.owned
.map
|= slots
;
516 * nxge_hio_vr_release
518 * Release a virtualization region (VR).
524 * We must uninitialize all DMA channels associated with the VR, too.
526 * The service domain will re-initialize these DMA channels later.
527 * See nxge_hio.c:nxge_hio_share_free() for details.
533 nxge_hio_vr_release(nxge_t
*nxge
)
535 nxge_hio_data_t
*nhd
= (nxge_hio_data_t
*)nxge
->nxge_hw_p
->hio
;
538 NXGE_DEBUG_MSG((nxge
, MEM2_CTL
, "==> nxge_hio_vr_release"));
540 if (nxge
->hio_vr
== NULL
) {
545 * Uninitialize interrupts.
547 nxge_hio_intr_uninit(nxge
);
550 * Uninitialize the receive DMA channels.
552 nxge_uninit_rxdma_channels(nxge
);
555 * Uninitialize the transmit DMA channels.
557 nxge_uninit_txdma_channels(nxge
);
560 * Remove both groups. Assumption: only two groups!
562 if (nxge
->rx_set
.group
[0] != NULL
)
563 nxge_grp_remove(nxge
, nxge
->rx_set
.group
[0]);
564 if (nxge
->tx_set
.group
[0] != NULL
)
565 nxge_grp_remove(nxge
, nxge
->tx_set
.group
[0]);
567 NXGE_DEBUG_MSG((nxge
, MEM2_CTL
, "<== nxge_hio_vr_release"));
572 MUTEX_ENTER(&nhd
->lock
);
573 for (vr_index
= 0; vr_index
< FUNC_VIR_MAX
; vr_index
++) {
574 if (nhd
->vr
[vr_index
].nxge
== (uintptr_t)nxge
) {
575 nhd
->vr
[vr_index
].nxge
= NULL
;
579 MUTEX_EXIT(&nhd
->lock
);
584 #if defined(NIU_LP_WORKAROUND)
588 * Configure the logical pages for a TDC.
592 * channel The TDC to configure.
605 nxge_dma_common_t
*data
;
606 nxge_dma_common_t
*control
;
612 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_tdc_lp_conf"));
614 ring
= nxge
->tx_rings
->rings
[channel
];
617 /* This shouldn't happen. */
621 if (!(dc
= nxge_grp_dc_find(nxge
, VP_BOUND_TX
, channel
)))
625 * Initialize logical page 0 for data buffers.
627 * <orig_ioaddr_pp> & <orig_alength> are initialized in
628 * nxge_main.c:nxge_dma_mem_alloc().
630 data
= nxge
->tx_buf_pool_p
->dma_buf_pool_p
[channel
];
631 ring
->hv_tx_buf_base_ioaddr_pp
= (uint64_t)data
->orig_ioaddr_pp
;
632 ring
->hv_tx_buf_ioaddr_size
= (uint64_t)data
->orig_alength
;
634 hv_rv
= hv_niu_vrtx_logical_page_conf(dc
->cookie
,
635 (uint64_t)channel
, 0,
636 ring
->hv_tx_buf_base_ioaddr_pp
,
637 ring
->hv_tx_buf_ioaddr_size
);
640 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
641 "<== nxge_tdc_lp_conf: channel %d "
642 "(page 0 data buf) hv: %d "
643 "ioaddr_pp $%p size 0x%llx ",
645 ring
->hv_tx_buf_base_ioaddr_pp
,
646 ring
->hv_tx_buf_ioaddr_size
));
647 return (NXGE_ERROR
| hv_rv
);
651 hv_rv
= hv_niu_vrtx_logical_page_info(dc
->cookie
,
652 (uint64_t)channel
, 0, &ra
, &size
);
654 NXGE_DEBUG_MSG((nxge
, HIO_CTL
,
655 "==> nxge_tdc_lp_conf: channel %d "
656 "(page 0 data buf) hv_rv 0x%llx "
657 "set ioaddr_pp $%p set size 0x%llx "
658 "get ra ioaddr_pp $%p get size 0x%llx ",
659 channel
, hv_rv
, ring
->hv_tx_buf_base_ioaddr_pp
,
660 ring
->hv_tx_buf_ioaddr_size
, ra
, size
));
663 * Initialize logical page 1 for control buffers.
665 control
= nxge
->tx_cntl_pool_p
->dma_buf_pool_p
[channel
];
666 ring
->hv_tx_cntl_base_ioaddr_pp
= (uint64_t)control
->orig_ioaddr_pp
;
667 ring
->hv_tx_cntl_ioaddr_size
= (uint64_t)control
->orig_alength
;
669 hv_rv
= hv_niu_vrtx_logical_page_conf(dc
->cookie
,
670 (uint64_t)channel
, (uint64_t)1,
671 ring
->hv_tx_cntl_base_ioaddr_pp
,
672 ring
->hv_tx_cntl_ioaddr_size
);
675 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
676 "<== nxge_tdc_lp_conf: channel %d "
677 "(page 1 cntl buf) hv_rv 0x%llx "
678 "ioaddr_pp $%p size 0x%llx ",
680 ring
->hv_tx_cntl_base_ioaddr_pp
,
681 ring
->hv_tx_cntl_ioaddr_size
));
682 return (NXGE_ERROR
| hv_rv
);
686 hv_rv
= hv_niu_vrtx_logical_page_info(dc
->cookie
,
687 (uint64_t)channel
, (uint64_t)1, &ra
, &size
);
689 NXGE_DEBUG_MSG((nxge
, HIO_CTL
,
690 "==> nxge_tdc_lp_conf: channel %d "
691 "(page 1 cntl buf) hv_rv 0x%llx "
692 "set ioaddr_pp $%p set size 0x%llx "
693 "get ra ioaddr_pp $%p get size 0x%llx ",
694 channel
, hv_rv
, ring
->hv_tx_cntl_base_ioaddr_pp
,
695 ring
->hv_tx_cntl_ioaddr_size
, ra
, size
));
697 ring
->hv_set
= B_TRUE
;
699 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_tdc_lp_conf"));
707 * Configure an RDC's logical pages.
711 * channel The RDC to configure.
724 nxge_dma_common_t
*data
;
725 nxge_dma_common_t
*control
;
731 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_rdc_lp_conf"));
733 ring
= nxge
->rx_rbr_rings
->rbr_rings
[channel
];
739 if (!(dc
= nxge_grp_dc_find(nxge
, VP_BOUND_RX
, channel
)))
743 * Initialize logical page 0 for data buffers.
745 * <orig_ioaddr_pp> & <orig_alength> are initialized in
746 * nxge_main.c:nxge_dma_mem_alloc().
748 data
= nxge
->rx_buf_pool_p
->dma_buf_pool_p
[channel
];
749 ring
->hv_rx_buf_base_ioaddr_pp
= (uint64_t)data
->orig_ioaddr_pp
;
750 ring
->hv_rx_buf_ioaddr_size
= (uint64_t)data
->orig_alength
;
752 hv_rv
= hv_niu_vrrx_logical_page_conf(dc
->cookie
,
753 (uint64_t)channel
, 0,
754 ring
->hv_rx_buf_base_ioaddr_pp
,
755 ring
->hv_rx_buf_ioaddr_size
);
758 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
759 "<== nxge_rdc_lp_conf: channel %d "
760 "(page 0 data buf) hv_rv 0x%llx "
761 "ioaddr_pp $%p size 0x%llx ",
763 ring
->hv_rx_buf_base_ioaddr_pp
,
764 ring
->hv_rx_buf_ioaddr_size
));
765 return (NXGE_ERROR
| hv_rv
);
769 hv_rv
= hv_niu_vrrx_logical_page_info(dc
->cookie
,
770 (uint64_t)channel
, 0, &ra
, &size
);
772 NXGE_DEBUG_MSG((nxge
, HIO_CTL
,
773 "==> nxge_rdc_lp_conf: channel %d "
774 "(page 0 data buf) hv_rv 0x%llx "
775 "set ioaddr_pp $%p set size 0x%llx "
776 "get ra ioaddr_pp $%p get size 0x%llx ",
777 channel
, hv_rv
, ring
->hv_rx_buf_base_ioaddr_pp
,
778 ring
->hv_rx_buf_ioaddr_size
, ra
, size
));
781 * Initialize logical page 1 for control buffers.
783 control
= nxge
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
];
784 ring
->hv_rx_cntl_base_ioaddr_pp
= (uint64_t)control
->orig_ioaddr_pp
;
785 ring
->hv_rx_cntl_ioaddr_size
= (uint64_t)control
->orig_alength
;
787 hv_rv
= hv_niu_vrrx_logical_page_conf(dc
->cookie
,
788 (uint64_t)channel
, (uint64_t)1,
789 ring
->hv_rx_cntl_base_ioaddr_pp
,
790 ring
->hv_rx_cntl_ioaddr_size
);
793 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
794 "<== nxge_rdc_lp_conf: channel %d "
795 "(page 1 cntl buf) hv_rv 0x%llx "
796 "ioaddr_pp $%p size 0x%llx ",
798 ring
->hv_rx_cntl_base_ioaddr_pp
,
799 ring
->hv_rx_cntl_ioaddr_size
));
800 return (NXGE_ERROR
| hv_rv
);
804 hv_rv
= hv_niu_vrrx_logical_page_info(dc
->cookie
,
805 (uint64_t)channel
, (uint64_t)1, &ra
, &size
);
807 NXGE_DEBUG_MSG((nxge
, HIO_CTL
,
808 "==> nxge_rdc_lp_conf: channel %d "
809 "(page 1 cntl buf) hv_rv 0x%llx "
810 "set ioaddr_pp $%p set size 0x%llx "
811 "get ra ioaddr_pp $%p get size 0x%llx ",
812 channel
, hv_rv
, ring
->hv_rx_cntl_base_ioaddr_pp
,
813 ring
->hv_rx_cntl_ioaddr_size
, ra
, size
));
815 ring
->hv_set
= B_TRUE
;
817 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_rdc_lp_conf"));
821 #endif /* defined(NIU_LP_WORKAROUND) */
824 * This value is in milliseconds.
826 #define NXGE_GUEST_TIMER 500 /* 1/2 second, for now */
829 * nxge_hio_start_timer
831 * Start the timer which checks for Tx hangs.
837 * This function is called from nxge_attach().
839 * This function kicks off the guest domain equivalent of
840 * nxge_check_hw_state(). It is called only once, from attach.
846 nxge_hio_start_timer(
849 nxge_hio_data_t
*nhd
= (nxge_hio_data_t
*)nxge
->nxge_hw_p
->hio
;
853 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "==> nxge_hio_timer_start"));
855 MUTEX_ENTER(&nhd
->lock
);
858 * Find our VR data structure. (We are currently assuming
859 * one VR per guest domain. That may change in the future.)
861 for (region
= FUNC0_VIR0
; region
< NXGE_VR_SR_MAX
; region
++) {
862 if (nhd
->vr
[region
].nxge
== (uintptr_t)nxge
)
866 MUTEX_EXIT(&nhd
->lock
);
868 if (region
== NXGE_VR_SR_MAX
) {
872 vr
= (nxge_hio_vr_t
*)&nhd
->vr
[region
];
874 nxge
->nxge_timerid
= timeout((void(*)(void *))nxge_check_guest_state
,
875 (void *)vr
, drv_usectohz(1000 * NXGE_GUEST_TIMER
));
877 NXGE_DEBUG_MSG((nxge
, HIO_CTL
, "<== nxge_hio_timer_start"));
881 * nxge_check_guest_state
883 * Essentially, check for Tx hangs. In the future, if we are
884 * polling the hardware, we may do so here.
887 * vr The virtualization region (VR) data structure.
890 * This function is the guest domain equivalent of
891 * nxge_check_hw_state(). Since we have no hardware to
892 * check, we simply call nxge_check_tx_hang().
898 nxge_check_guest_state(
901 nxge_t
*nxge
= (nxge_t
*)vr
->nxge
;
903 NXGE_DEBUG_MSG((nxge
, SYSERR_CTL
, "==> nxge_check_guest_state"));
905 MUTEX_ENTER(nxge
->genlock
);
906 nxge
->nxge_timerid
= 0;
908 if (nxge
->nxge_mac_state
== NXGE_MAC_STARTED
) {
909 nxge_check_tx_hang(nxge
);
911 nxge
->nxge_timerid
= timeout((void(*)(void *))
912 nxge_check_guest_state
, (caddr_t
)vr
,
913 drv_usectohz(1000 * NXGE_GUEST_TIMER
));
916 nxge_check_guest_state_exit
:
917 MUTEX_EXIT(nxge
->genlock
);
918 NXGE_DEBUG_MSG((nxge
, SYSERR_CTL
, "<== nxge_check_guest_state"));
922 nxge_hio_rdc_intr_arm(p_nxge_t nxge
, boolean_t arm
)
930 * Validate state of guest interface before
933 if (!isLDOMguest(nxge
))
935 if (nxge
->nxge_mac_state
!= NXGE_MAC_STARTED
)
939 * In guest domain, always and only dealing with
940 * group 0 for an instance of nxge.
942 group
= nxge
->rx_set
.group
[0];
945 * Look to arm the the RDCs for the group.
947 for (channel
= 0; channel
< NXGE_MAX_RDCS
; channel
++) {
948 if ((1 << channel
) & group
->map
) {
952 dc
= nxge_grp_dc_find(nxge
, VP_BOUND_RX
, channel
);
957 * Get the RDC's ldg group.
959 ldgp
= &nxge
->ldgvp
->ldgp
[dc
->ldg
.vector
];
964 * Set the state of the group.
968 nxge_hio_ldgimgn(nxge
, ldgp
);
976 nxge_hio_rdc_enable(p_nxge_t nxge
)
984 * Validate state of guest interface before
987 if (!isLDOMguest(nxge
))
989 if (nxge
->nxge_mac_state
!= NXGE_MAC_STARTED
)
993 * In guest domain, always and only dealing with
994 * group 0 for an instance of nxge.
996 group
= nxge
->rx_set
.group
[0];
999 * Get the PIO handle.
1001 handle
= NXGE_DEV_NPI_HANDLE(nxge
);
1003 for (channel
= 0; channel
< NXGE_MAX_RDCS
; channel
++) {
1005 * If this channel is in the map, then enable
1008 if ((1 << channel
) & group
->map
) {
1010 * Enable the RDC and clear the empty bit.
1012 rval
= npi_rxdma_cfg_rdc_enable(handle
, channel
);
1013 if (rval
!= NPI_SUCCESS
)
1014 return (NXGE_ERROR
);
1016 (void) npi_rxdma_channel_rbr_empty_clear(handle
,
1023 #endif /* defined(sun4v) */