Merge commit '008b34be09d7b9c3e7a18d3ce9ef8b5c4f4ff8b8'
[unleashed.git] / kernel / drivers / net / hxge / hxge_hw.c
blob421ce11d5eef6ec1da479caf0d919a514519c5d2
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <hxge_impl.h>
28 lb_property_t lb_normal = {normal, "normal", hxge_lb_normal};
29 lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g};
31 uint32_t hxge_lb_dbg = 1;
33 extern uint32_t hxge_jumbo_frame_size;
35 static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
37 void
38 hxge_global_reset(p_hxge_t hxgep)
40 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset"));
42 (void) hxge_intr_hw_disable(hxgep);
44 if (hxgep->suspended)
45 (void) hxge_link_init(hxgep);
47 (void) hxge_vmac_init(hxgep);
49 (void) hxge_intr_hw_enable(hxgep);
51 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset"));
55 void
56 hxge_hw_id_init(p_hxge_t hxgep)
58 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init"));
61 * Initialize the frame size to either standard "1500 + 38" or
62 * jumbo. The user may tune the frame size through the "mtu" parameter
63 * using "dladm set-linkprop"
65 hxgep->vmac.minframesize = MIN_FRAME_SIZE;
66 hxgep->vmac.maxframesize = HXGE_DEFAULT_MTU + MTU_TO_FRAME_SIZE;
67 if (hxgep->param_arr[param_accept_jumbo].value)
68 hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_frame_size;
70 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d",
71 hxgep->vmac.maxframesize));
72 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init"));
75 void
76 hxge_hw_init_niu_common(p_hxge_t hxgep)
78 p_hxge_hw_list_t hw_p;
80 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));
82 if ((hw_p = hxgep->hxge_hw_p) == NULL) {
83 return;
86 MUTEX_ENTER(&hw_p->hxge_cfg_lock);
87 if (hw_p->flags & COMMON_INIT_DONE) {
88 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
89 " already done for dip $%p exiting", hw_p->parent_devp));
90 MUTEX_EXIT(&hw_p->hxge_cfg_lock);
91 return;
94 hw_p->flags = COMMON_INIT_START;
95 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
96 "hxge_hw_init_niu_common Started for device id %x",
97 hw_p->parent_devp));
99 (void) hxge_pfc_hw_reset(hxgep);
100 hw_p->flags = COMMON_INIT_DONE;
101 MUTEX_EXIT(&hw_p->hxge_cfg_lock);
103 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
104 "hxge_hw_init_niu_common Done for device id %x",
105 hw_p->parent_devp));
106 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
109 uint_t
110 hxge_intr(caddr_t arg1, caddr_t arg2)
112 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
113 p_hxge_t hxgep = (p_hxge_t)arg2;
114 uint8_t ldv;
115 hpi_handle_t handle;
116 p_hxge_ldgv_t ldgvp;
117 p_hxge_ldg_t ldgp, t_ldgp;
118 p_hxge_ldv_t t_ldvp;
119 uint32_t vector0 = 0, vector1 = 0;
120 int j, nldvs;
121 hpi_status_t rs = HPI_SUCCESS;
124 * DDI interface returns second arg as NULL
126 if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) {
127 hxgep = ldvp->hxgep;
130 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr"));
132 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
133 HXGE_ERROR_MSG((hxgep, INT_CTL,
134 "<== hxge_intr: not initialized"));
135 return (DDI_INTR_UNCLAIMED);
138 ldgvp = hxgep->ldgvp;
140 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp));
142 if (ldvp == NULL && ldgvp)
143 t_ldvp = ldvp = ldgvp->ldvp;
144 if (ldvp)
145 ldgp = t_ldgp = ldvp->ldgp;
147 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
148 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
150 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
151 HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: "
152 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
153 HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready"));
154 return (DDI_INTR_UNCLAIMED);
158 * This interrupt handler will have to go through
159 * all the logical devices to find out which
160 * logical device interrupts us and then call
161 * its handler to process the events.
163 handle = HXGE_DEV_HPI_HANDLE(hxgep);
164 t_ldgp = ldgp;
165 t_ldvp = ldgp->ldvp;
166 nldvs = ldgp->nldvs;
168 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d",
169 nldvs, ldgvp->ldg_intrs));
170 HXGE_DEBUG_MSG((hxgep, INT_CTL,
171 "==> hxge_intr(%d): #ldvs %d", i, nldvs));
174 * Get this group's flag bits.
176 t_ldgp->interrupted = B_FALSE;
177 rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1);
178 if (rs != HPI_SUCCESS)
179 return (DDI_INTR_UNCLAIMED);
181 if (!vector0 && !vector1) {
182 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
183 "no interrupts on group %d", t_ldgp->ldg));
184 return (DDI_INTR_UNCLAIMED);
187 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
188 "vector0 0x%llx vector1 0x%llx", vector0, vector1));
190 t_ldgp->interrupted = B_TRUE;
191 nldvs = t_ldgp->nldvs;
194 * Process all devices that share this group.
196 for (j = 0; j < nldvs; j++, t_ldvp++) {
198 * Call device's handler if flag bits are on.
200 ldv = t_ldvp->ldv;
201 if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) {
202 HXGE_DEBUG_MSG((hxgep, INT_CTL,
203 "==> hxge_intr: calling device %d"
204 " #ldvs %d #intrs %d", j, nldvs, nintrs));
205 (void) (t_ldvp->ldv_intr_handler)(
206 (caddr_t)t_ldvp, arg2);
211 * Re-arm group interrupts
213 if (t_ldgp->interrupted) {
214 HXGE_DEBUG_MSG((hxgep, INT_CTL,
215 "==> hxge_intr: arm group %d", t_ldgp->ldg));
216 (void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
217 t_ldgp->arm, t_ldgp->ldg_timer);
220 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr"));
221 return (DDI_INTR_CLAIMED);
224 hxge_status_t
225 hxge_peu_handle_sys_errors(p_hxge_t hxgep)
227 hpi_handle_t handle;
228 p_hxge_peu_sys_stats_t statsp;
229 peu_intr_stat_t stat;
231 handle = hxgep->hpi_handle;
232 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
234 HXGE_REG_RD32(handle, PEU_INTR_STAT, &stat.value);
237 * The PCIE errors are unrecoverrable and cannot be cleared.
238 * The only thing we can do here is to mask them off to prevent
239 * continued interrupts.
241 HXGE_REG_WR32(handle, PEU_INTR_MASK, 0xffffffff);
243 if (stat.bits.spc_acc_err) {
244 statsp->spc_acc_err++;
245 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
246 "==> hxge_peu_handle_sys_errors: spc_acc_err"));
249 if (stat.bits.tdc_pioacc_err) {
250 statsp->tdc_pioacc_err++;
251 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
252 "==> hxge_peu_handle_sys_errors: tdc_pioacc_err"));
255 if (stat.bits.rdc_pioacc_err) {
256 statsp->rdc_pioacc_err++;
257 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
258 "==> hxge_peu_handle_sys_errors: rdc_pioacc_err"));
261 if (stat.bits.pfc_pioacc_err) {
262 statsp->pfc_pioacc_err++;
263 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
264 "==> hxge_peu_handle_sys_errors: pfc_pioacc_err"));
267 if (stat.bits.vmac_pioacc_err) {
268 statsp->vmac_pioacc_err++;
269 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
270 "==> hxge_peu_handle_sys_errors: vmac_pioacc_err"));
273 if (stat.bits.cpl_hdrq_parerr) {
274 statsp->cpl_hdrq_parerr++;
275 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
276 "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr"));
279 if (stat.bits.cpl_dataq_parerr) {
280 statsp->cpl_dataq_parerr++;
281 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
282 "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr"));
285 if (stat.bits.retryram_xdlh_parerr) {
286 statsp->retryram_xdlh_parerr++;
287 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
288 "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr"));
291 if (stat.bits.retrysotram_xdlh_parerr) {
292 statsp->retrysotram_xdlh_parerr++;
293 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
294 "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr"));
297 if (stat.bits.p_hdrq_parerr) {
298 statsp->p_hdrq_parerr++;
299 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
300 "==> hxge_peu_handle_sys_errors: p_hdrq_parerr"));
303 if (stat.bits.p_dataq_parerr) {
304 statsp->p_dataq_parerr++;
305 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
306 "==> hxge_peu_handle_sys_errors: p_dataq_parerr"));
309 if (stat.bits.np_hdrq_parerr) {
310 statsp->np_hdrq_parerr++;
311 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
312 "==> hxge_peu_handle_sys_errors: np_hdrq_parerr"));
315 if (stat.bits.np_dataq_parerr) {
316 statsp->np_dataq_parerr++;
317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
318 "==> hxge_peu_handle_sys_errors: np_dataq_parerr"));
321 if (stat.bits.eic_msix_parerr) {
322 statsp->eic_msix_parerr++;
323 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
324 "==> hxge_peu_handle_sys_errors: eic_msix_parerr"));
327 if (stat.bits.hcr_parerr) {
328 statsp->hcr_parerr++;
329 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
330 "==> hxge_peu_handle_sys_errors: hcr_parerr"));
333 HXGE_FM_REPORT_ERROR(hxgep, 0, HXGE_FM_EREPORT_PEU_ERR);
334 return (HXGE_OK);
337 /*ARGSUSED*/
338 uint_t
339 hxge_syserr_intr(caddr_t arg1, caddr_t arg2)
341 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
342 p_hxge_t hxgep = (p_hxge_t)arg2;
343 p_hxge_ldg_t ldgp = NULL;
344 hpi_handle_t handle;
345 dev_err_stat_t estat;
347 if ((arg1 == NULL) && (arg2 == NULL)) {
348 return (DDI_INTR_UNCLAIMED);
351 if ((arg2 == NULL) ||
352 ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) {
353 if (ldvp != NULL) {
354 hxgep = ldvp->hxgep;
358 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
359 "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
361 if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
362 ldgp = ldvp->ldgp;
363 if (ldgp == NULL) {
364 HXGE_ERROR_MSG((hxgep, SYSERR_CTL,
365 "<== hxge_syserrintr(no logical group): "
366 "arg2 $%p arg1 $%p", hxgep, ldvp));
367 return (DDI_INTR_UNCLAIMED);
372 * This interrupt handler is for system error interrupts.
374 handle = HXGE_DEV_HPI_HANDLE(hxgep);
375 estat.value = 0;
376 (void) hpi_fzc_sys_err_stat_get(handle, &estat);
377 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
378 "==> hxge_syserr_intr: device error 0x%016llx", estat.value));
380 if (estat.bits.tdc_err0 || estat.bits.tdc_err1) {
381 /* TDMC */
382 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
383 "==> hxge_syserr_intr: device error - TDMC"));
384 (void) hxge_txdma_handle_sys_errors(hxgep);
385 } else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) {
386 /* RDMC */
387 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
388 "==> hxge_syserr_intr: device error - RDMC"));
389 (void) hxge_rxdma_handle_sys_errors(hxgep);
390 } else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) {
391 /* PCI-E */
392 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
393 "==> hxge_syserr_intr: device error - PCI-E"));
395 /* kstats are updated here */
396 (void) hxge_peu_handle_sys_errors(hxgep);
398 if (estat.bits.peu_err1)
399 HXGE_FM_REPORT_ERROR(hxgep, 0,
400 HXGE_FM_EREPORT_PEU_ERR);
402 if (estat.bits.vnm_pio_err1)
403 HXGE_FM_REPORT_ERROR(hxgep, 0,
404 HXGE_FM_EREPORT_PEU_VNM_PIO_ERR);
405 } else if (estat.value != 0) {
406 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
407 "==> hxge_syserr_intr: device error - unknown"));
410 if ((ldgp != NULL) && (ldvp != NULL) &&
411 (ldgp->nldvs == 1) && !ldvp->use_timer) {
412 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
413 B_TRUE, ldgp->ldg_timer);
416 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr"));
417 return (DDI_INTR_CLAIMED);
420 void
421 hxge_intr_hw_enable(p_hxge_t hxgep)
423 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable"));
425 (void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE);
427 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable"));
430 void
431 hxge_intr_hw_disable(p_hxge_t hxgep)
433 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable"));
435 (void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE);
437 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable"));
440 /*ARGSUSED*/
441 void
442 hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
444 p_hxge_t hxgep = (p_hxge_t)arg;
446 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank"));
449 * Replace current ticks and counts for later
450 * processing by the receive packet interrupt routines.
452 hxgep->intr_timeout = (uint16_t)ticks;
454 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank"));
457 void
458 hxge_hw_stop(p_hxge_t hxgep)
460 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop"));
462 (void) hxge_tx_vmac_disable(hxgep);
463 (void) hxge_rx_vmac_disable(hxgep);
464 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
465 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
467 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop"));
470 void
471 hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
473 int cmd;
475 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl"));
477 if (hxgep == NULL) {
478 miocnak(wq, mp, 0, EINVAL);
479 return;
482 iocp->ioc_error = 0;
483 cmd = iocp->ioc_cmd;
485 switch (cmd) {
486 default:
487 miocnak(wq, mp, 0, EINVAL);
488 return;
490 case HXGE_PUT_TCAM:
491 hxge_put_tcam(hxgep, mp->b_cont);
492 miocack(wq, mp, 0, 0);
493 break;
495 case HXGE_GET_TCAM:
496 hxge_get_tcam(hxgep, mp->b_cont);
497 miocack(wq, mp, 0, 0);
498 break;
500 case HXGE_RTRACE:
501 hxge_rtrace_ioctl(hxgep, wq, mp, iocp);
502 break;
507 * 10G is the only loopback mode for Hydra.
509 void
510 hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
511 struct iocblk *iocp)
513 p_lb_property_t lb_props;
514 size_t size;
515 int i;
517 if (mp->b_cont == NULL) {
518 miocnak(wq, mp, 0, EINVAL);
521 switch (iocp->ioc_cmd) {
522 case LB_GET_MODE:
523 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command"));
524 if (hxgep != NULL) {
525 *(lb_info_sz_t *)mp->b_cont->b_rptr =
526 hxgep->statsp->port_stats.lb_mode;
527 miocack(wq, mp, sizeof (hxge_lb_t), 0);
528 } else
529 miocnak(wq, mp, 0, EINVAL);
530 break;
532 case LB_SET_MODE:
533 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command"));
534 if (iocp->ioc_count != sizeof (uint32_t)) {
535 miocack(wq, mp, 0, 0);
536 break;
538 if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) {
539 miocack(wq, mp, 0, 0);
540 } else {
541 miocnak(wq, mp, 0, EPROTO);
543 break;
545 case LB_GET_INFO_SIZE:
546 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
547 if (hxgep != NULL) {
548 size = sizeof (lb_normal) + sizeof (lb_mac10g);
550 *(lb_info_sz_t *)mp->b_cont->b_rptr = size;
552 HXGE_DEBUG_MSG((hxgep, IOC_CTL,
553 "HXGE_GET_LB_INFO command: size %d", size));
554 miocack(wq, mp, sizeof (lb_info_sz_t), 0);
555 } else
556 miocnak(wq, mp, 0, EINVAL);
557 break;
559 case LB_GET_INFO:
560 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command"));
561 if (hxgep != NULL) {
562 size = sizeof (lb_normal) + sizeof (lb_mac10g);
563 HXGE_DEBUG_MSG((hxgep, IOC_CTL,
564 "HXGE_GET_LB_INFO command: size %d", size));
565 if (size == iocp->ioc_count) {
566 i = 0;
567 lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
568 lb_props[i++] = lb_normal;
569 lb_props[i++] = lb_mac10g;
571 miocack(wq, mp, size, 0);
572 } else
573 miocnak(wq, mp, 0, EINVAL);
574 } else {
575 miocnak(wq, mp, 0, EINVAL);
576 cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x",
577 iocp->ioc_cmd);
580 break;
584 /*ARGSUSED*/
585 boolean_t
586 hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp)
588 boolean_t status = B_TRUE;
589 uint32_t lb_mode;
590 lb_property_t *lb_info;
592 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb"));
593 lb_mode = hxgep->statsp->port_stats.lb_mode;
594 if (lb_mode == *(uint32_t *)mp->b_rptr) {
595 cmn_err(CE_NOTE,
596 "hxge%d: Loopback mode already set (lb_mode %d).\n",
597 hxgep->instance, lb_mode);
598 status = B_FALSE;
599 goto hxge_set_lb_exit;
602 lb_mode = *(uint32_t *)mp->b_rptr;
603 lb_info = NULL;
605 /* 10G is the only loopback mode for Hydra */
606 if (lb_mode == lb_normal.value)
607 lb_info = &lb_normal;
608 else if (lb_mode == lb_mac10g.value)
609 lb_info = &lb_mac10g;
610 else {
611 cmn_err(CE_NOTE,
612 "hxge%d: Loopback mode not supported(mode %d).\n",
613 hxgep->instance, lb_mode);
614 status = B_FALSE;
615 goto hxge_set_lb_exit;
618 if (lb_mode == hxge_lb_normal) {
619 if (hxge_lb_dbg) {
620 cmn_err(CE_NOTE,
621 "!hxge%d: Returning to normal operation",
622 hxgep->instance);
625 hxgep->statsp->port_stats.lb_mode = hxge_lb_normal;
626 hxge_global_reset(hxgep);
628 goto hxge_set_lb_exit;
631 hxgep->statsp->port_stats.lb_mode = lb_mode;
633 if (hxge_lb_dbg)
634 cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode",
635 hxgep->instance, lb_info->key);
637 if (lb_info->lb_type == internal) {
638 if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g))
639 hxgep->statsp->mac_stats.link_speed = 10000;
640 else {
641 cmn_err(CE_NOTE,
642 "hxge%d: Loopback mode not supported(mode %d).\n",
643 hxgep->instance, lb_mode);
644 status = B_FALSE;
645 goto hxge_set_lb_exit;
647 hxgep->statsp->mac_stats.link_duplex = 2;
648 hxgep->statsp->mac_stats.link_up = 1;
651 hxge_global_reset(hxgep);
653 hxge_set_lb_exit:
654 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
655 "<== hxge_set_lb status = 0x%08x", status));
657 return (status);
660 void
661 hxge_check_hw_state(p_hxge_t hxgep)
663 p_hxge_ldgv_t ldgvp;
664 p_hxge_ldv_t t_ldvp;
666 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state"));
668 MUTEX_ENTER(hxgep->genlock);
670 hxgep->hxge_timerid = 0;
671 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
672 goto hxge_check_hw_state_exit;
675 hxge_check_tx_hang(hxgep);
677 ldgvp = hxgep->ldgvp;
678 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
679 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
680 "NULL ldgvp (interrupt not ready)."));
681 goto hxge_check_hw_state_exit;
684 t_ldvp = ldgvp->ldvp_syserr;
685 if (!t_ldvp->use_timer) {
686 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
687 "ldgvp $%p t_ldvp $%p use_timer flag %d",
688 ldgvp, t_ldvp, t_ldvp->use_timer));
689 goto hxge_check_hw_state_exit;
692 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
693 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
694 "Bad register acc handle"));
697 (void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep);
699 hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state,
700 HXGE_CHECK_TIMER);
702 hxge_check_hw_state_exit:
703 MUTEX_EXIT(hxgep->genlock);
705 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state"));
708 /*ARGSUSED*/
709 static void
710 hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
711 struct iocblk *iocp)
713 ssize_t size;
714 rtrace_t *rtp;
715 mblk_t *nmp;
716 uint32_t i, j;
717 uint32_t start_blk;
718 uint32_t base_entry;
719 uint32_t num_entries;
721 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl"));
723 size = 1024;
724 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
725 HXGE_DEBUG_MSG((hxgep, STR_CTL,
726 "malformed M_IOCTL MBLKL = %d size = %d",
727 MBLKL(mp->b_cont), size));
728 miocnak(wq, mp, 0, EINVAL);
729 return;
732 nmp = mp->b_cont;
733 rtp = (rtrace_t *)nmp->b_rptr;
734 start_blk = rtp->next_idx;
735 num_entries = rtp->last_idx;
736 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
738 HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk));
739 HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries));
740 HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry));
742 rtp->next_idx = hpi_rtracebuf.next_idx;
743 rtp->last_idx = hpi_rtracebuf.last_idx;
744 rtp->wrapped = hpi_rtracebuf.wrapped;
745 for (i = 0, j = base_entry; i < num_entries; i++, j++) {
746 rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr;
747 rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32;
748 rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32;
751 nmp->b_wptr = nmp->b_rptr + size;
752 HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl"));
753 miocack(wq, mp, (int)size, 0);